]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge tag 'nfc-next-4.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo...
authorDavid S. Miller <davem@davemloft.net>
Thu, 12 May 2016 00:00:54 +0000 (20:00 -0400)
committerDavid S. Miller <davem@davemloft.net>
Thu, 12 May 2016 00:00:54 +0000 (20:00 -0400)
Samuel Ortiz says:

====================
NFC 4.7 pull request

This is the first NFC pull request for 4.7. With this one we
mainly have:

- Support for NXP's pn532 NFC chipset. The pn532 is based on the same
  microcontroller as the pn533, but it talks to the host through i2c
  instead of USB. By separating the pn533 driver into core and PHY
  parts, we can not add the i2c layer and support the pn532 chipset.

- Support for NCI's loopback mode. This is a testing mode where each
  packet received by the NFCC is sent back to the DH, allowing the
  host to test that the controller can receive and send data.

- A few ACPI related fixes for the STMicro drivers, in order to match
  the device tree naming scheme.

- A bunch of cleanups for the st-nci and the st21nfca STMicro drivers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
2597 files changed:
.mailmap
Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
Documentation/DocBook/80211.tmpl
Documentation/accounting/getdelays.c
Documentation/devicetree/bindings/arc/archs-pct.txt
Documentation/devicetree/bindings/arc/pct.txt
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/btmrvl.txt [deleted file]
Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
Documentation/devicetree/bindings/net/apm-xgene-enet.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/net/microchip,enc28j60.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
Documentation/devicetree/bindings/rtc/s3c-rtc.txt
Documentation/filesystems/cramfs.txt
Documentation/filesystems/tmpfs.txt
Documentation/filesystems/vfs.txt
Documentation/input/event-codes.txt
Documentation/kernel-parameters.txt
Documentation/networking/altera_tse.txt
Documentation/networking/bonding.txt
Documentation/networking/checksum-offloads.txt
Documentation/networking/dsa/bcm_sf2.txt
Documentation/networking/dsa/dsa.txt
Documentation/networking/filter.txt
Documentation/networking/gen_stats.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/ipvlan.txt
Documentation/networking/netdev-features.txt
Documentation/networking/netdevices.txt
Documentation/networking/pktgen.txt
Documentation/networking/segmentation-offloads.txt [new file with mode: 0644]
Documentation/networking/vrf.txt
Documentation/networking/xfrm_sync.txt
Documentation/power/runtime_pm.txt
Documentation/sysctl/vm.txt
Documentation/usb/gadget_multi.txt
Documentation/x86/protection-keys.txt [new file with mode: 0644]
Documentation/x86/topology.txt [new file with mode: 0644]
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/include/asm/fb.h [new file with mode: 0644]
arch/arc/include/asm/io.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/mmzone.h [new file with mode: 0644]
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/entry-compact.S
arch/arc/mm/cache.c
arch/arc/mm/init.c
arch/arm/boot/dts/am335x-baltos-ir5221.dts
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/dm814x-clocks.dtsi
arch/arm/boot/dts/dra62x-clocks.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/meson8.dtsi
arch/arm/boot/dts/meson8b.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/sun8i-q8-common.dtsi
arch/arm/configs/multi_v5_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/configs/orion5x_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/unistd.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/setup.c
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/common.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
arch/arm/mach-omap2/clockdomains7xx_data.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/soc.h
arch/arm/mach-pxa/devices.c
arch/arm/mach-sa1100/Kconfig
arch/arm/mach-shmobile/timer.c
arch/arm/mach-socfpga/headsmp.S
arch/arm/mach-uniphier/platsmp.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/flush.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-v7.S
arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/broadcom/vulcan.dtsi
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/head.S
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp/s2-setup.c
arch/m68k/coldfire/gpio.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/mips/alchemy/common/dbdma.c
arch/mips/alchemy/devboards/db1000.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/ath79/clock.c
arch/mips/bcm47xx/sprom.c
arch/mips/boot/compressed/Makefile
arch/mips/boot/dts/brcm/bcm7435.dtsi
arch/mips/boot/dts/qca/ar9132.dtsi
arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
arch/mips/cavium-octeon/executive/cvmx-pko.c
arch/mips/cavium-octeon/smp.c
arch/mips/configs/ci20_defconfig
arch/mips/dec/int-handler.S
arch/mips/fw/arc/memory.c
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
arch/mips/include/asm/mach-generic/kernel-entry-init.h
arch/mips/include/asm/mach-ip27/irq.h
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
arch/mips/include/asm/mach-jz4740/gpio.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mips-r2-to-r6-emul.h
arch/mips/include/asm/octeon/cvmx-config.h
arch/mips/include/asm/octeon/cvmx.h
arch/mips/include/asm/pci/bridge.h
arch/mips/include/asm/sgi/hpc3.h
arch/mips/include/asm/sgiarcs.h
arch/mips/include/asm/sn/ioc3.h
arch/mips/include/asm/sn/sn0/hubio.h
arch/mips/include/asm/uaccess.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/mips-cm.c
arch/mips/kernel/mips-r2-to-r6-emul.c
arch/mips/kernel/module-rela.c
arch/mips/kernel/module.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/pm-cps.c
arch/mips/kernel/process.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kvm/tlb.c
arch/mips/kvm/trap_emul.c
arch/mips/math-emu/ieee754dp.c
arch/mips/math-emu/ieee754sp.c
arch/mips/mm/sc-ip22.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/sgi-ip27/ip27-memory.c
arch/nios2/lib/memset.c
arch/parisc/Kconfig
arch/parisc/Kconfig.debug
arch/parisc/Makefile
arch/parisc/include/asm/ftrace.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/Makefile
arch/parisc/kernel/asm-offsets.c
arch/parisc/kernel/cache.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/head.S
arch/parisc/kernel/module.c
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/traps.c
arch/parisc/lib/fixup.S
arch/parisc/mm/fault.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/asm/word-at-a-time.h
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/prom.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/s390/Kconfig
arch/s390/hypfs/inode.c
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/seccomp.h
arch/s390/include/asm/tlbflush.h
arch/s390/lib/spinlock.c
arch/s390/mm/gmap.c
arch/s390/mm/init.c
arch/s390/mm/mmap.c
arch/s390/mm/pgalloc.c
arch/s390/pci/pci_dma.c
arch/sh/include/asm/smp.h
arch/sh/include/asm/topology.h
arch/sh/kernel/cpu/sh4a/smp-shx3.c
arch/sh/kernel/topology.c
arch/sparc/configs/sparc32_defconfig
arch/sparc/configs/sparc64_defconfig
arch/sparc/include/asm/spitfire.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/cherrs.S
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/fpu_traps.S
arch/sparc/kernel/head_64.S
arch/sparc/kernel/misctrap.S
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/spiterrs.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/utrap.S
arch/sparc/kernel/vio.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/winfixup.S
arch/sparc/mm/init_64.c
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/um/drivers/net_kern.c
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/events/amd/core.c
arch/x86/events/amd/ibs.c
arch/x86/events/amd/iommu.c
arch/x86/events/intel/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/pt.h
arch/x86/events/intel/rapl.c
arch/x86/events/perf_event.h
arch/x86/include/asm/hugetlb.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/powerflags.c
arch/x86/kernel/head_32.S
arch/x86/kernel/smpboot.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/tsc_msr.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/setup_nx.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/ras/mce_amd_inj.c
arch/x86/xen/apic.c
arch/x86/xen/smp.c
arch/x86/xen/spinlock.c
arch/xtensa/platforms/iss/network.c
block/bio.c
block/blk-core.c
block/blk-settings.c
block/blk-sysfs.c
block/cfq-iosched.c
block/compat_ioctl.c
block/ioctl.c
block/partition-generic.c
crypto/Kconfig
crypto/ahash.c
crypto/rsa-pkcs1pad.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/nfit.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci_platform.c
drivers/ata/ahci_seattle.c [new file with mode: 0644]
drivers/ata/libahci.c
drivers/base/power/opp/core.c
drivers/base/power/wakeup.c
drivers/base/property.c
drivers/bcma/main.c
drivers/block/aoe/aoeblk.c
drivers/block/brd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_nl.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btmrvl_sdio.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/bluetooth/hci_vhci.c
drivers/bus/mvebu-mbus.c
drivers/bus/uniphier-system-bus.c
drivers/char/hw_random/bcm63xx-rng.c
drivers/char/pcmcia/synclink_cs.c
drivers/clk/imx/clk-imx6q.c
drivers/clocksource/tango_xtal.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/sti-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
drivers/crypto/ccp/ccp-crypto-sha.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_ctl_drv.c
drivers/crypto/qat/qat_common/adf_sriov.c
drivers/crypto/talitos.c
drivers/dma/dw/core.c
drivers/dma/edma.c
drivers/dma/hsu/hsu.c
drivers/dma/hsu/hsu.h
drivers/dma/omap-dma.c
drivers/dma/xilinx/xilinx_vdma.c
drivers/edac/i7core_edac.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-palmas.c
drivers/firewire/net.c
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/vars.c
drivers/firmware/psci.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/ipuv3-plane.h
drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/ni_reg.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/gpu/ipu-v3/ipu-dmfc.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-lenovo.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/hv/ring_buffer.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/i2c-core.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/idle/intel_idle.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/max1363.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/health/max30100.c
drivers/iio/imu/inv_mpu6050/Kconfig
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
drivers/iio/industrialio-buffer.c
drivers/iio/light/apds9960.c
drivers/iio/magnetometer/ak8975.c
drivers/iio/magnetometer/st_magn.h
drivers/infiniband/core/cache.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/joystick/xpad.c
drivers/input/misc/arizona-haptics.c
drivers/input/misc/pmic8xxx-pwrkey.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/tablet/gtco.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/zforce_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/eicon/message.c
drivers/isdn/hysdn/hysdn_net.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_x25iface.c
drivers/isdn/mISDN/socket.c
drivers/lguest/interrupts_and_traps.c
drivers/lguest/lg.h
drivers/lguest/x86/core.c
drivers/mailbox/mailbox-test.c
drivers/mailbox/mailbox-xgene-slimpro.c
drivers/mailbox/mailbox.c
drivers/mailbox/pcc.c
drivers/md/bitmap.c
drivers/md/dm-cache-metadata.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/media/media-device.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/s3c-camif/camif-core.c
drivers/media/usb/au0828/au0828-cards.c
drivers/media/usb/au0828/au0828-core.c
drivers/media/usb/au0828/au0828-input.c
drivers/media/usb/au0828/au0828-video.c
drivers/media/usb/au0828/au0828.h
drivers/media/usb/usbvision/usbvision-video.c
drivers/media/v4l2-core/v4l2-mc.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-memops.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/message/fusion/mptlan.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/irq.c
drivers/misc/cxl/native.c
drivers/misc/ibmasm/ibmasmfs.c
drivers/misc/lkdtm.c
drivers/misc/mic/vop/vop_vringh.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/card/block.c
drivers/mmc/core/host.c
drivers/mmc/host/Kconfig
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/tmio_mmc_dma.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/usdhi6rol0.c
drivers/mtd/devices/block2mtd.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/appletalk/cops.c
drivers/net/arcnet/com90xx.c
drivers/net/can/dev.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/janz-ican3.c
drivers/net/can/m_can/m_can.c
drivers/net/can/mscan/mscan.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/cris/eth_v10.c
drivers/net/dsa/Kconfig
drivers/net/dsa/Makefile
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6060.h
drivers/net/dsa/mv88e6123.c [deleted file]
drivers/net/dsa/mv88e6131.c [deleted file]
drivers/net/dsa/mv88e6171.c [deleted file]
drivers/net/dsa/mv88e6352.c [deleted file]
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/3com/3c574_cs.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/8390/lib8390.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/amd/7990.c
drivers/net/ethernet/amd/a2065.c
drivers/net/ethernet/amd/atarilance.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/amd/ni65.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/sunlance.c
drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c.h
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/pnic.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_enet.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sun3_82586.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/phy.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/Makefile
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_common.c
drivers/net/ethernet/intel/fm10k/fm10k_common.h
drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_iov.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.h
drivers/net/ethernet/intel/fm10k/fm10k_ptp.c [deleted file]
drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
drivers/net/ethernet/intel/fm10k/fm10k_vf.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_client.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_devids.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_devids.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/Makefile
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_selftest.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_selftest.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/atp.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc9194.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/wiznet/Kconfig
drivers/net/ethernet/wiznet/Makefile
drivers/net/ethernet/wiznet/w5100-spi.c [new file with mode: 0644]
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5100.h [new file with mode: 0644]
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xircom/xirc2ps_cs.c
drivers/net/fjes/fjes_hw.c
drivers/net/fjes/fjes_hw.h
drivers/net/fjes/fjes_main.c
drivers/net/geneve.c
drivers/net/gtp.c [new file with mode: 0644]
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/atusb.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/irda/ali-ircc.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/irda-usb.c
drivers/net/irda/nsc-ircc.c
drivers/net/irda/smsc-ircc2.c
drivers/net/irda/stir4200.c
drivers/net/irda/via-ircc.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/mdio-mux.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/spi_ks8995.c
drivers/net/ppp/ppp_generic.c
drivers/net/rionet.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wan/farsync.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wan/sbni.c
drivers/net/wimax/i2400m/netdev.c
drivers/net/wireless/admtek/adm8211.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/swap.c
drivers/net/wireless/ath/ath10k/swap.h
drivers/net/wireless/ath/ath10k/targaddrs.h
drivers/net/wireless/ath/ath10k/testmode.c
drivers/net/wireless/ath/ath10k/thermal.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/txrx.h
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath5k/ani.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath5k/led.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/core.h
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9340_initvals.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
drivers/net/wireless/ath/ath9k/ar9485_initvals.h
drivers/net/wireless/ath/ath9k/ar953x_initvals.h
drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ar956x_initvals.h
drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/ath9k/common-init.c
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/dynack.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/rng.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/mac.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/phy.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/regd.h
drivers/net/wireless/ath/wcn36xx/debug.c
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/pmc.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wcn36xx/txrx.c
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/ioctl.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/p2p.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/trace.h
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_platform.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/atmel/at76c50x-usb.c
drivers/net/wireless/atmel/atmel.c
drivers/net/wireless/broadcom/b43/b43.h
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/b43/phy_ac.c
drivers/net/wireless/broadcom/b43/phy_common.c
drivers/net/wireless/broadcom/b43/phy_ht.c
drivers/net/wireless/broadcom/b43/phy_lcn.c
drivers/net/wireless/broadcom/b43/phy_lp.c
drivers/net/wireless/broadcom/b43/phy_n.c
drivers/net/wireless/broadcom/b43/tables_lpphy.c
drivers/net/wireless/broadcom/b43/tables_nphy.c
drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
drivers/net/wireless/broadcom/b43/xmit.c
drivers/net/wireless/broadcom/b43legacy/main.c
drivers/net/wireless/broadcom/b43legacy/xmit.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/ipw2x00/ipw2100.c
drivers/net/wireless/intel/ipw2x00/ipw2200.c
drivers/net/wireless/intel/iwlegacy/3945-mac.c
drivers/net/wireless/intel/iwlegacy/3945-rs.c
drivers/net/wireless/intel/iwlegacy/3945.c
drivers/net/wireless/intel/iwlegacy/4965-mac.c
drivers/net/wireless/intel/iwlegacy/4965-rs.c
drivers/net/wireless/intel/iwlegacy/4965.c
drivers/net/wireless/intel/iwlegacy/4965.h
drivers/net/wireless/intel/iwlegacy/common.c
drivers/net/wireless/intel/iwlegacy/common.h
drivers/net/wireless/intel/iwlegacy/debug.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/agn.h
drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/dvm/dev.h
drivers/net/wireless/intel/iwlwifi/dvm/devices.c
drivers/net/wireless/intel/iwlwifi/dvm/lib.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
drivers/net/wireless/intel/iwlwifi/dvm/rs.h
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
drivers/net/wireless/intel/iwlwifi/dvm/sta.c
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/iwl-1000.c
drivers/net/wireless/intel/iwlwifi/iwl-2000.c
drivers/net/wireless/intel/iwlwifi/iwl-5000.c
drivers/net/wireless/intel/iwlwifi/iwl-6000.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-fw.h
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/Makefile
drivers/net/wireless/intel/iwlwifi/mvm/coex.c
drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c [deleted file]
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sf.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/orinoco/cfg.c
drivers/net/wireless/intersil/orinoco/hw.c
drivers/net/wireless/intersil/orinoco/main.c
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/intersil/orinoco/scan.c
drivers/net/wireless/intersil/p54/eeprom.c
drivers/net/wireless/intersil/p54/main.c
drivers/net/wireless/intersil/p54/p54.h
drivers/net/wireless/intersil/p54/txrx.c
drivers/net/wireless/intersil/prism54/isl_38xx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mac80211_hwsim.h
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/cmd.c
drivers/net/wireless/marvell/libertas_tf/main.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/cfp.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sdio.h
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/tdls.c
drivers/net/wireless/marvell/mwifiex/txrx.c
drivers/net/wireless/marvell/mwifiex/uap_cmd.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/mt7601u/init.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/ralink/rt2x00/rt73usb.c
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/rsi/rsi_91x_mgmt.c
drivers/net/wireless/rsi/rsi_91x_pkt.c
drivers/net/wireless/rsi/rsi_main.h
drivers/net/wireless/st/cw1200/main.c
drivers/net/wireless/st/cw1200/scan.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/st/cw1200/txrx.c
drivers/net/wireless/st/cw1200/wsm.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl1251/ps.c
drivers/net/wireless/ti/wl1251/rx.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl12xx/scan.c
drivers/net/wireless/ti/wl18xx/cmd.c
drivers/net/wireless/ti/wl18xx/event.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/scan.c
drivers/net/wireless/ti/wl18xx/tx.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/rx.h
drivers/net/wireless/ti/wlcore/scan.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zydas/zd1201.c
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
drivers/nvdimm/btt.c
drivers/nvdimm/bus.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/pci.c
drivers/nvmem/mxs-ocotp.c
drivers/of/of_mdio.c
drivers/oprofile/oprofilefs.c
drivers/pci/access.c
drivers/pci/host/pci-imx6.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.h
drivers/pcmcia/db1xxx_ss.c
drivers/perf/arm_pmu.c
drivers/phy/phy-rockchip-dp.c
drivers/phy/phy-rockchip-emmc.c
drivers/pinctrl/freescale/Kconfig
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-xway.c
drivers/pinctrl/qcom/pinctrl-ipq4019.c
drivers/pinctrl/sh-pfc/core.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pinctrl/sunxi/pinctrl-sunxi.h
drivers/platform/goldfish/goldfish_pipe.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_punit_ipc.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/powercap/intel_rapl.c
drivers/pwm/pwm-fsl-ftm.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/remoteproc/st_remoteproc.c
drivers/rtc/rtc-ds1307.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/char/sclp_ctl.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/ctcm_mpc.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/aacraid/linit.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxlflash/main.c
drivers/scsi/cxlflash/main.h
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/st.c
drivers/soc/mediatek/mtk-scpsys.c
drivers/spi/spi-imx.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-rockchip.c
drivers/spi/spi.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
drivers/staging/lustre/include/linux/lnet/types.h
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
drivers/staging/lustre/lnet/libcfs/debug.c
drivers/staging/lustre/lnet/libcfs/tracefile.c
drivers/staging/lustre/lnet/libcfs/tracefile.h
drivers/staging/lustre/lnet/lnet/lib-md.c
drivers/staging/lustre/lnet/lnet/lib-move.c
drivers/staging/lustre/lnet/lnet/lib-socket.c
drivers/staging/lustre/lnet/lnet/router.c
drivers/staging/lustre/lnet/selftest/brw_test.c
drivers/staging/lustre/lnet/selftest/conctl.c
drivers/staging/lustre/lnet/selftest/conrpc.c
drivers/staging/lustre/lnet/selftest/framework.c
drivers/staging/lustre/lnet/selftest/rpc.c
drivers/staging/lustre/lnet/selftest/selftest.h
drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
drivers/staging/lustre/lustre/include/lu_object.h
drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
drivers/staging/lustre/lustre/include/lustre_mdc.h
drivers/staging/lustre/lustre/include/lustre_net.h
drivers/staging/lustre/lustre/include/obd.h
drivers/staging/lustre/lustre/include/obd_support.h
drivers/staging/lustre/lustre/lclient/lcommon_cl.c
drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
drivers/staging/lustre/lustre/ldlm/ldlm_request.c
drivers/staging/lustre/lustre/llite/dir.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/lustre/lustre/llite/llite_mmap.c
drivers/staging/lustre/lustre/llite/lloop.c
drivers/staging/lustre/lustre/llite/lproc_llite.c
drivers/staging/lustre/lustre/llite/rw.c
drivers/staging/lustre/lustre/llite/rw26.c
drivers/staging/lustre/lustre/llite/vvp_io.c
drivers/staging/lustre/lustre/llite/vvp_page.c
drivers/staging/lustre/lustre/lmv/lmv_obd.c
drivers/staging/lustre/lustre/mdc/mdc_request.c
drivers/staging/lustre/lustre/mgc/mgc_request.c
drivers/staging/lustre/lustre/obdclass/cl_page.c
drivers/staging/lustre/lustre/obdclass/class_obd.c
drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
drivers/staging/lustre/lustre/obdclass/lu_object.c
drivers/staging/lustre/lustre/obdecho/echo_client.c
drivers/staging/lustre/lustre/osc/lproc_osc.c
drivers/staging/lustre/lustre/osc/osc_cache.c
drivers/staging/lustre/lustre/osc/osc_page.c
drivers/staging/lustre/lustre/osc/osc_request.c
drivers/staging/lustre/lustre/ptlrpc/client.c
drivers/staging/lustre/lustre/ptlrpc/import.c
drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
drivers/staging/lustre/lustre/ptlrpc/recover.c
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/staging/olpc_dcon/Kconfig [new file with mode: 0644]
drivers/staging/olpc_dcon/Makefile [new file with mode: 0644]
drivers/staging/olpc_dcon/TODO [new file with mode: 0644]
drivers/staging/olpc_dcon/olpc_dcon.c [new file with mode: 0644]
drivers/staging/olpc_dcon/olpc_dcon.h [new file with mode: 0644]
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c [new file with mode: 0644]
drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c [new file with mode: 0644]
drivers/staging/rdma/hfi1/Kconfig
drivers/staging/rdma/hfi1/TODO
drivers/staging/rdma/hfi1/file_ops.c
drivers/staging/rdma/hfi1/mmu_rb.c
drivers/staging/rdma/hfi1/mmu_rb.h
drivers/staging/rdma/hfi1/qp.c
drivers/staging/rdma/hfi1/user_exp_rcv.c
drivers/staging/rdma/hfi1/user_sdma.c
drivers/staging/rtl8188eu/os_dep/mon.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/staging/rtl8192e/rtllib_softmac.c
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
drivers/staging/rtl8192u/r8192U_core.c
drivers/staging/rtl8723au/core/rtw_mlme_ext.c
drivers/staging/rtl8723au/include/ieee80211.h
drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
drivers/staging/vt6655/channel.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/rxtx.c
drivers/staging/vt6656/channel.c
drivers/staging/vt6656/int.c
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/rxtx.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/wlan-ng/p80211netdev.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_fabric_configfs.c
drivers/thermal/Kconfig
drivers/thermal/hisi_thermal.c
drivers/thermal/mtk_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/power_allocator.c
drivers/thermal/thermal_core.c
drivers/tty/n_gsm.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/uartlite.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/tty_io.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/config.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/port.c
drivers/usb/core/usb.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-keystone.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-plat.h
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/jz4740.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-qcom-8x16-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mct_u232.c
drivers/usb/serial/option.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_uas.h
drivers/usb/storage/usb.c
drivers/usb/usbip/usbip_common.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
drivers/video/fbdev/pvr2fb.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_ring.c
drivers/xen/balloon.c
drivers/xen/events/events_base.c
drivers/xen/evtchn.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_super.c
fs/affs/file.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/mntpt.c
fs/afs/rxrpc.c
fs/afs/super.c
fs/afs/write.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/block_dev.c
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/struct-funcs.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zlib.c
fs/buffer.c
fs/cachefiles/rdwr.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/super.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/configfs/mount.c
fs/cramfs/README
fs/cramfs/inode.c
fs/crypto/crypto.c
fs/dax.c
fs/dcache.c
fs/debugfs/inode.c
fs/devpts/inode.c
fs/direct-io.c
fs/dlm/lowcomms.c
fs/ecryptfs/crypto.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ecryptfs/mmap.c
fs/ecryptfs/read_write.c
fs/efivarfs/super.c
fs/exofs/dir.c
fs/exofs/inode.c
fs/exofs/namei.c
fs/ext2/dir.c
fs/ext2/namei.c
fs/ext4/crypto.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/move_extent.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/ext4/symlink.c
fs/ext4/xattr.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/inline.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/freevxfs/vxfs_immed.c
fs/freevxfs/vxfs_lookup.c
fs/freevxfs/vxfs_subr.c
fs/fs-writeback.c
fs/fscache/page.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/meta_io.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/hfs/bnode.c
fs/hfs/btree.c
fs/hfs/inode.c
fs/hfsplus/bitmap.c
fs/hfsplus/bnode.c
fs/hfsplus/btree.c
fs/hfsplus/inode.c
fs/hfsplus/super.c
fs/hfsplus/xattr.c
fs/hostfs/hostfs_kern.c
fs/hugetlbfs/inode.c
fs/isofs/compress.c
fs/isofs/inode.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jffs2/debug.c
fs/jffs2/file.c
fs/jffs2/fs.c
fs/jffs2/gc.c
fs/jffs2/nodelist.c
fs/jffs2/write.c
fs/jfs/jfs_metapage.c
fs/jfs/jfs_metapage.h
fs/jfs/super.c
fs/kernfs/mount.c
fs/libfs.c
fs/logfs/dev_bdev.c
fs/logfs/dev_mtd.c
fs/logfs/dir.c
fs/logfs/file.c
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/logfs/super.c
fs/minix/dir.c
fs/minix/namei.c
fs/mpage.c
fs/ncpfs/dir.c
fs/ncpfs/ncplib_kernel.h
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayout.h
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4file.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/read.c
fs/nfs/write.c
fs/nilfs2/bmap.c
fs/nilfs2/btnode.c
fs/nilfs2/dir.c
fs/nilfs2/gcinode.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/namei.c
fs/nilfs2/page.c
fs/nilfs2/recovery.c
fs/nilfs2/segment.c
fs/ntfs/aops.c
fs/ntfs/aops.h
fs/ntfs/attrib.c
fs/ntfs/bitmap.c
fs/ntfs/compress.c
fs/ntfs/dir.c
fs/ntfs/file.c
fs/ntfs/index.c
fs/ntfs/inode.c
fs/ntfs/lcnalloc.c
fs/ntfs/logfile.c
fs/ntfs/mft.c
fs/ntfs/ntfs.h
fs/ntfs/super.c
fs/ocfs2/alloc.c
fs/ocfs2/aops.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/file.c
fs/ocfs2/mmap.c
fs/ocfs2/ocfs2.h
fs/ocfs2/quota_global.c
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/orangefs/dir.c
fs/orangefs/inode.c
fs/orangefs/orangefs-bufmap.c
fs/orangefs/orangefs-debugfs.c
fs/orangefs/orangefs-utils.c
fs/orangefs/protocol.h
fs/orangefs/xattr.c
fs/overlayfs/super.c
fs/pipe.c
fs/pnode.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/pstore/inode.c
fs/qnx6/dir.c
fs/qnx6/inode.c
fs/qnx6/qnx6.h
fs/quota/dquot.c
fs/quota/netlink.c
fs/ramfs/inode.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/reiserfs/ioctl.c
fs/reiserfs/journal.c
fs/reiserfs/stree.c
fs/reiserfs/tail_conversion.c
fs/reiserfs/xattr.c
fs/seq_file.c
fs/splice.c
fs/squashfs/block.c
fs/squashfs/cache.c
fs/squashfs/decompressor.c
fs/squashfs/file.c
fs/squashfs/file_direct.c
fs/squashfs/lz4_wrapper.c
fs/squashfs/lzo_wrapper.c
fs/squashfs/page_actor.c
fs/squashfs/page_actor.h
fs/squashfs/super.c
fs/squashfs/symlink.c
fs/squashfs/xz_wrapper.c
fs/squashfs/zlib_wrapper.c
fs/sync.c
fs/sysv/dir.c
fs/sysv/namei.c
fs/ubifs/file.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/udf/file.c
fs/udf/inode.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/ufs/balloc.c
fs/ufs/dir.c
fs/ufs/inode.c
fs/ufs/namei.c
fs/ufs/util.c
fs/ufs/util.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_file.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_pnfs.c
fs/xfs/xfs_super.c
include/acpi/acpi_bus.h
include/asm-generic/futex.h
include/drm/drm_cache.h
include/drm/ttm/ttm_bo_api.h
include/linux/ath9k_platform.h
include/linux/atomic.h
include/linux/backing-dev-defs.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/buffer_head.h
include/linux/can/dev.h
include/linux/ceph/auth.h
include/linux/ceph/libceph.h
include/linux/ceph/osd_client.h
include/linux/cgroup-defs.h
include/linux/compiler-gcc.h
include/linux/configfs.h
include/linux/cpuset.h
include/linux/dcache.h
include/linux/devpts_fs.h
include/linux/ethtool.h
include/linux/f2fs_fs.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscrypto.h
include/linux/genl_magic_struct.h
include/linux/hash.h
include/linux/huge_mm.h
include/linux/ieee802154.h
include/linux/if_ether.h
include/linux/iommu.h
include/linux/ipv6.h
include/linux/lockdep.h
include/linux/mdio.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/port.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/x_tables.h
include/linux/nfs_page.h
include/linux/nilfs2_fs.h
include/linux/nl802154.h
include/linux/of.h
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/phy.h
include/linux/pmem.h
include/linux/qed/qed_eth_if.h
include/linux/qed/qed_if.h
include/linux/rculist.h
include/linux/rculist_nulls.h
include/linux/rio_mport_cdev.h [deleted file]
include/linux/sched.h
include/linux/sctp.h
include/linux/seq_file.h
include/linux/skbuff.h
include/linux/soc/qcom/smd.h
include/linux/socket.h
include/linux/sunrpc/svc.h
include/linux/swap.h
include/linux/thermal.h
include/linux/trace_events.h
include/linux/tty_driver.h
include/linux/u64_stats_sync.h
include/linux/usb_usual.h
include/media/videobuf2-core.h
include/net/6lowpan.h
include/net/act_api.h
include/net/af_rxrpc.h
include/net/bluetooth/hci.h
include/net/cfg80211.h
include/net/cls_cgroup.h
include/net/codel.h
include/net/codel_impl.h [new file with mode: 0644]
include/net/codel_qdisc.h [new file with mode: 0644]
include/net/devlink.h
include/net/dsa.h
include/net/dst.h
include/net/fq.h [new file with mode: 0644]
include/net/fq_impl.h [new file with mode: 0644]
include/net/gen_stats.h
include/net/geneve.h
include/net/gre.h
include/net/gtp.h [new file with mode: 0644]
include/net/icmp.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip6_tunnel.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/l3mdev.h
include/net/mac80211.h
include/net/mac802154.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_conntrack_labels.h
include/net/netfilter/nf_tables.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/xfrm.h
include/net/nl802154.h
include/net/route.h
include/net/rtnetlink.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_mirred.h
include/net/tcp.h
include/net/transp_v6.h
include/net/udp.h
include/net/udp_tunnel.h
include/net/vxlan.h
include/net/xfrm.h
include/rdma/ib.h
include/rxrpc/packet.h
include/scsi/scsi_device.h
include/sound/hda_i915.h
include/sound/hda_regmap.h
include/target/target_core_fabric.h
include/trace/events/btrfs.h
include/trace/perf.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/devlink.h
include/uapi/linux/fib_rules.h
include/uapi/linux/gen_stats.h
include/uapi/linux/gtp.h [new file with mode: 0644]
include/uapi/linux/if_bridge.h
include/uapi/linux/if_ether.h
include/uapi/linux/if_link.h
include/uapi/linux/if_macsec.h
include/uapi/linux/ila.h
include/uapi/linux/inet_diag.h
include/uapi/linux/ip_vs.h
include/uapi/linux/l2tp.h
include/uapi/linux/lwtunnel.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/ipset/ip_set.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/nfnetlink_acct.h
include/uapi/linux/netfilter/nfnetlink_conntrack.h
include/uapi/linux/netfilter/nfnetlink_queue.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/qrtr.h [new file with mode: 0644]
include/uapi/linux/quota.h
include/uapi/linux/rio_mport_cdev.h [new file with mode: 0644]
include/uapi/linux/rtnetlink.h
include/uapi/linux/stddef.h
include/uapi/linux/swab.h
include/uapi/linux/tc_act/tc_bpf.h
include/uapi/linux/tc_act/tc_connmark.h
include/uapi/linux/tc_act/tc_csum.h
include/uapi/linux/tc_act/tc_defact.h
include/uapi/linux/tc_act/tc_gact.h
include/uapi/linux/tc_act/tc_ife.h
include/uapi/linux/tc_act/tc_ipt.h
include/uapi/linux/tc_act/tc_mirred.h
include/uapi/linux/tc_act/tc_nat.h
include/uapi/linux/tc_act/tc_pedit.h
include/uapi/linux/tc_act/tc_skbedit.h
include/uapi/linux/tc_act/tc_vlan.h
include/uapi/linux/tcp_metrics.h
include/uapi/linux/udp.h
include/uapi/linux/usb/ch9.h
include/uapi/linux/v4l2-dv-timings.h
include/uapi/linux/virtio_config.h
include/uapi/linux/xfrm.h
include/video/imx-ipu-v3.h
include/xen/page.h
ipc/mqueue.c
kernel/bpf/core.c
kernel/bpf/helpers.c
kernel/bpf/inode.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpu.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/futex.c
kernel/irq/ipi.c
kernel/kcov.c
kernel/kexec_core.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/locking/qspinlock_stat.h
kernel/resource.c
kernel/sched/core.c
kernel/taskstats.c
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/trace/bpf_trace.c
kernel/trace/trace_events.c
kernel/workqueue.c
lib/assoc_array.c
lib/lz4/lz4defs.h
lib/nlattr.c
lib/stackdepot.c
lib/test_bpf.c
mm/backing-dev.c
mm/compaction.c
mm/fadvise.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/mincore.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_io.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/swap.c
mm/swap_state.c
mm/swapfile.c
mm/truncate.c
mm/userfaultfd.c
mm/vmscan.c
mm/zswap.c
net/6lowpan/6lowpan_i.h
net/6lowpan/core.c
net/6lowpan/debugfs.c
net/6lowpan/iphc.c
net/6lowpan/nhc_udp.c
net/Kconfig
net/Makefile
net/atm/lec.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/netdev.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/l2cap_sock.c
net/bridge/br_ioctl.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_sysfs_br.c
net/bridge/br_sysfs_if.c
net/bridge/br_vlan.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nf_tables_bridge.c
net/ceph/auth.c
net/ceph/auth_none.c
net/ceph/auth_none.h
net/ceph/auth_x.c
net/ceph/auth_x.h
net/ceph/messenger.c
net/ceph/osd_client.c
net/ceph/pagelist.c
net/ceph/pagevec.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow.c
net/core/gen_stats.c
net/core/neighbour.c
net/core/net-procfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/dccp/dccp.h
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/options.c
net/dccp/timer.c
net/decnet/dn_route.c
net/dsa/dsa.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/hsr/Kconfig
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_forward.c
net/hsr/hsr_framereg.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/hsr/hsr_slave.c
net/ieee802154/6lowpan/6lowpan_i.h
net/ieee802154/6lowpan/core.c
net/ieee802154/6lowpan/tx.c
net/ieee802154/nl-mac.c
net/ieee802154/nl802154.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fou.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cdg.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_vegas.h
net/ipv4/tcp_veno.c
net/ipv4/tcp_westwood.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/ila/ila.h
net/ipv6/ila/ila_common.c
net/ipv6/ila/ila_lwt.c
net/ipv6/ila/ila_xlat.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/irlan/irlan_eth.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l3mdev/l3mdev.c
net/llc/af_llc.c
net/llc/llc_proc.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs_netdev.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/spectmgmt.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tdls.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mpls/af_mpls.c
net/mpls/mpls_gso.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_labels.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_trace.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dynset.c
net/netfilter/nft_limit.c
net/netfilter/nft_rbtree.c
net/netfilter/x_tables.c
net/netfilter/xt_connlabel.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport-internal_dev.c
net/packet/af_packet.c
net/qrtr/Kconfig [new file with mode: 0644]
net/qrtr/Makefile [new file with mode: 0644]
net/qrtr/qrtr.c [new file with mode: 0644]
net/qrtr/qrtr.h [new file with mode: 0644]
net/qrtr/smd.c [new file with mode: 0644]
net/rds/cong.c
net/rds/ib_cm.c
net/rds/ib_recv.c
net/rds/page.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rxrpc/Kconfig
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-accept.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-call.c
net/rxrpc/ar-connection.c
net/rxrpc/ar-connevent.c
net/rxrpc/ar-input.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-output.c
net/rxrpc/ar-proc.c
net/rxrpc/ar-recvmsg.c
net/rxrpc/ar-security.c
net/rxrpc/insecure.c [new file with mode: 0644]
net/rxrpc/misc.c [new file with mode: 0644]
net/rxrpc/rxkad.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/cls_bpf.c
net/sched/cls_u32.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_codel.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_tbf.c
net/sctp/Kconfig
net/sctp/Makefile
net/sctp/chunk.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/proc.c
net/sctp/sctp_diag.c [new file with mode: 0644]
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/cache.c
net/sunrpc/rpc_pipe.c
net/sunrpc/socklib.c
net/sunrpc/svcsock.c
net/sunrpc/xdr.c
net/sunrpc/xprtsock.c
net/switchdev/switchdev.c
net/tipc/bearer.h
net/tipc/core.c
net/tipc/core.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/node.c
net/tipc/node.h
net/tipc/server.c
net/tipc/server.h
net/tipc/socket.c
net/tipc/socket.h
net/tipc/subscr.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/debugfs.c
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/README.rst [new file with mode: 0644]
samples/bpf/bpf_helpers.h
samples/bpf/map_perf_test_user.c
samples/bpf/offwaketime_kern.c
samples/bpf/parse_ldabs.c [new file with mode: 0644]
samples/bpf/parse_simple.c [new file with mode: 0644]
samples/bpf/parse_varlen.c [new file with mode: 0644]
samples/bpf/spintest_kern.c
samples/bpf/test_cls_bpf.sh [new file with mode: 0755]
samples/bpf/test_verifier.c
samples/bpf/trace_output_kern.c
samples/bpf/tracex1_kern.c
samples/bpf/tracex2_kern.c
samples/bpf/tracex4_kern.c
samples/bpf/tracex5_kern.c
scripts/asn1_compiler.c
scripts/mod/file2alias.c
security/integrity/ima/ima_policy.c
security/selinux/nlmsgtab.c
sound/hda/ext/hdac_ext_stream.c
sound/hda/hdac_device.c
sound/hda/hdac_i915.c
sound/hda/hdac_regmap.c
sound/isa/sscape.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/pcxhr/pcxhr_core.c
sound/soc/codecs/Kconfig
sound/soc/codecs/arizona.c
sound/soc/codecs/arizona.h
sound/soc/codecs/cs35l32.c
sound/soc/codecs/cs47l24.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5640.h
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8997.c
sound/soc/codecs/wm8998.c
sound/soc/intel/Kconfig
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/intel/skylake/skl-sst-dsp.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl-topology.h
sound/soc/intel/skylake/skl.c
sound/soc/soc-dapm.c
sound/usb/Kconfig
sound/usb/Makefile
sound/usb/card.c
sound/usb/card.h
sound/usb/media.c [deleted file]
sound/usb/media.h [deleted file]
sound/usb/mixer.h
sound/usb/mixer_maps.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/stream.c
sound/usb/usbaudio.h
tools/lib/lockdep/run_tests.sh
tools/net/bpf_jit_disasm.c
tools/objtool/Documentation/stack-validation.txt
tools/objtool/builtin-check.c
tools/perf/MANIFEST
tools/perf/arch/powerpc/util/header.c
tools/perf/tests/perf-targz-src-pkg
tools/perf/ui/browsers/hists.c
tools/perf/util/event.c
tools/perf/util/genelf.h
tools/perf/util/intel-bts.c
tools/perf/util/intel-pt.c
tools/perf/util/jitdump.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/reuseport_dualstack.c [new file with mode: 0644]
tools/testing/selftests/seccomp/seccomp_bpf.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/pmu.c

index 90c0aefc276d4c2444098fba232e3f8e2f200ae0..08b80428f5837001940381417e0b1d0be1125b2c 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
 Felix Moeller <felix@derklecks.de>
 Filipe Lautert <filipe@icewall.org>
 Franck Bui-Huu <vagabon.xyz@gmail.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
 Frank Zago <fzago@systemfabricworks.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
@@ -66,6 +69,7 @@ Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
@@ -79,6 +83,7 @@ Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
index 7ac7d7262bb718f7d9e1d5e60c14491a4d7e7db0..3c3514815cd53bada6852068370d7cd193be9293 100644 (file)
@@ -1,23 +1,18 @@
-What:          /sys/devices/platform/<i2c-demux-name>/cur_master
+What:          /sys/devices/platform/<i2c-demux-name>/available_masters
 Date:          January 2016
 KernelVersion: 4.6
 Contact:       Wolfram Sang <wsa@the-dreams.de>
 Description:
+               Reading the file will give you a list of masters which can be
+               selected for a demultiplexed bus. The format is
+               "<index>:<name>". Example from a Renesas Lager board:
 
-This file selects the active I2C master for a demultiplexed bus.
+               0:/i2c@e6500000 1:/i2c@e6508000
 
-Write 0 there for the first master, 1 for the second etc. Reading the file will
-give you a list with the active master marked. Example from a Renesas Lager
-board:
-
-root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
-* 0 - /i2c@9
-  1 - /i2c@e6520000
-  2 - /i2c@e6530000
-
-root@Lager:~# echo 2 > /sys/devices/platform/i2c@8/cur_master
-
-root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
-  0 - /i2c@9
-  1 - /i2c@e6520000
-* 2 - /i2c@e6530000
+What:          /sys/devices/platform/<i2c-demux-name>/current_master
+Date:          January 2016
+KernelVersion: 4.6
+Contact:       Wolfram Sang <wsa@the-dreams.de>
+Description:
+               This file selects/shows the active I2C master for a demultiplexed
+               bus. It uses the <index> value from the file 'available_masters'.
index f9b9ad7894f580b9d6add880f9bfa4a702e35153..f2a312b35875d9744161ec5cbf307ac8ab9dfe75 100644 (file)
@@ -75,7 +75,6 @@
       <chapter>
       <title>Device registration</title>
 !Pinclude/net/cfg80211.h Device registration
-!Finclude/net/cfg80211.h ieee80211_band
 !Finclude/net/cfg80211.h ieee80211_channel_flags
 !Finclude/net/cfg80211.h ieee80211_channel
 !Finclude/net/cfg80211.h ieee80211_rate_flags
index 7785fb5eb93f594af7518ceee2714bdf6681bbbb..b5ca536e56a8b971060c9b6d83e86f18cc1e2221 100644 (file)
@@ -505,6 +505,8 @@ int main(int argc, char *argv[])
                                                if (!loop)
                                                        goto done;
                                                break;
+                                       case TASKSTATS_TYPE_NULL:
+                                               break;
                                        default:
                                                fprintf(stderr, "Unknown nested"
                                                        " nla_type %d\n",
@@ -512,7 +514,8 @@ int main(int argc, char *argv[])
                                                break;
                                        }
                                        len2 += NLA_ALIGN(na->nla_len);
-                                       na = (struct nlattr *) ((char *) na + len2);
+                                       na = (struct nlattr *)((char *)na +
+                                                              NLA_ALIGN(na->nla_len));
                                }
                                break;
 
index 1ae98b87c6402fa7fec85bd5b67950b3be48b54f..e4b9dcee6d41a12579969691dab40289f21bfe58 100644 (file)
@@ -2,7 +2,7 @@
 
 The ARC HS can be configured with a pipeline performance monitor for counting
 CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters.
+are 100+ hardware conditions dynamically mapped to up to 32 counters.
 It also supports overflow interrupts.
 
 Required properties:
index 7b9588444f20b7a15cf9e317f243ae616f4631d8..4e874d9a38a6e5cd4a60ba2651fc24a42285dc2a 100644 (file)
@@ -2,7 +2,7 @@
 
 The ARC700 can be configured with a pipeline performance monitor for counting
 CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters
+are 100+ hardware conditions dynamically mapped to up to 32 counters
 
 Note that:
  * The ARC 700 PCT does not support interrupts; although HW events may be
index ccc62f1453066fc10ba23cc3f3fa037ee9ccac9c..3f0cbbb8395f84ef1bc1cba898854d386f236b53 100644 (file)
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
                          can be one of:
                            "allwinner,sun6i-a31"
                            "allwinner,sun8i-a23"
-                           "arm,psci"
                            "arm,realview-smp"
                            "brcm,bcm-nsp-smp"
                            "brcm,brahma-b15"
index 30df832a6f2f513f4e23841dd0a0ad888cc13dba..87adfb227ca92facd1b3dfe75501b21e9a610a52 100644 (file)
@@ -32,6 +32,10 @@ Optional properties:
 - target-supply     : regulator for SATA target power
 - phys              : reference to the SATA PHY node
 - phy-names         : must be "sata-phy"
+- ports-implemented : Mask that indicates which ports that the HBA supports
+                     are available for software to use. Useful if PORTS_IMPL
+                     is not programmed by the BIOS, which is true with
+                     some embedded SOC's.
 
 Required properties when using sub-nodes:
 - #address-cells    : number of cells to encode an address
diff --git a/Documentation/devicetree/bindings/btmrvl.txt b/Documentation/devicetree/bindings/btmrvl.txt
deleted file mode 100644 (file)
index 58f964b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-btmrvl
-------
-
-Required properties:
-
-  - compatible : must be "btmrvl,cfgdata"
-
-Optional properties:
-
-  - btmrvl,cal-data : Calibration data downloaded to the device during
-                     initialization. This is an array of 28 values(u8).
-
-  - btmrvl,gpio-gap : gpio and gap (in msecs) combination to be
-                     configured.
-
-Example:
-
-GPIO pin 13 is configured as a wakeup source and GAP is set to 100 msecs
-in below example.
-
-btmrvl {
-       compatible = "btmrvl,cfgdata";
-
-       btmrvl,cal-data = /bits/ 8 <
-               0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
-               0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
-               0x00 0x00 0xf0 0x00>;
-       btmrvl,gpio-gap = <0x0d64>;
-};
index e0fc2c11dd00bb9bc903b746b709829b3108bc9d..241fb0545b9eebce2bd70277d44fcffd77365a28 100644 (file)
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
 The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following
+- compatible: has to be "qca,<soctype>-pll" and one of the following
   fallbacks:
   - "qca,ar7100-pll"
   - "qca,ar7240-pll"
@@ -21,8 +21,8 @@ Optional properties:
 
 Example:
 
-       memory-controller@18050000 {
-               compatible = "qca,ar9132-ppl", "qca,ar9130-pll";
+       pll-controller@18050000 {
+               compatible = "qca,ar9132-pll", "qca,ar9130-pll";
                reg = <0x18050000 0x20>;
 
                clock-names = "ref";
index f0d71bc52e64be39cea42a7cc04b1e05662ad641..0b4a85fe2d8633a194a7bb38eed7ef4497c8da96 100644 (file)
@@ -6,8 +6,8 @@ RK3xxx SoCs.
 Required properties :
 
  - reg : Offset and length of the register set for the device
- - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
-               "rockchip,rk3288-i2c".
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
+               "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
  - interrupts : interrupt number
  - clocks : parent clock
 
index 078060a97f95e677789fc7f6fd5f00904424d66b..05f705e32a4a651e4a724b9450560a4a7d343e00 100644 (file)
@@ -18,6 +18,8 @@ Required properties for all the ethernet interfaces:
   - First is the Rx interrupt.  This irq is mandatory.
   - Second is the Tx completion interrupt.
     This is supported only on SGMII based 1GbE and 10GbE interfaces.
+- channel: Ethernet to CPU, start channel (prefetch buffer) number
+  - Must map to the first irq and irqs must be sequential
 - port-id: Port number (0 or 1)
 - clocks: Reference to the clock entry.
 - local-mac-address: MAC address assigned to this device
index 28a4781ab6d7b9d6a1ab553ed96857f0f509e250..0ae06491b4302209607340a5f4b250c7ca1fe100 100644 (file)
@@ -45,13 +45,13 @@ Required properties:
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 - mac-address          : See ethernet.txt file in the same directory
-- phy_id               : Specifies slave phy id
+- phy_id               : Specifies slave phy id (deprecated, use phy-handle)
 - phy-handle           : See ethernet.txt file in the same directory
 
 Slave sub-nodes:
 - fixed-link           : See fixed-link.txt file in the same directory
-                         Either the property phy_id, or the sub-node
-                         fixed-link can be specified
+
+Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
index 5fdbbcdf8c4b84109cfeb28d0c77397d0f93aa42..9f4807f90c31f3bf35ed9bc97ce4692bd5204c44 100644 (file)
@@ -31,8 +31,6 @@ A switch child node has the following optional property:
                          switch. Must be set if the switch can not detect
                          the presence and/or size of a connected EEPROM,
                          otherwise optional.
-- reset-gpios          : phandle and specifier to a gpio line connected to
-                         reset pin of the switch chip.
 
 A switch may have multiple "port" children nodes
 
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
new file mode 100644 (file)
index 0000000..7629189
--- /dev/null
@@ -0,0 +1,35 @@
+Marvell DSA Switch Device Tree Bindings
+---------------------------------------
+
+WARNING: This binding is currently unstable. Do not program it into a
+FLASH never to be changed again. Once this binding is stable, this
+warning will be removed.
+
+If you need a stable binding, use the old dsa.txt binding.
+
+Marvell Switches are MDIO devices. The following properties should be
+placed as a child node of an mdio device.
+
+The properties described here are those specific to Marvell devices.
+Additional required and optional properties can be found in dsa.txt.
+
+Required properties:
+- compatible           : Should be one of "marvell,mv88e6085",
+- reg                  : Address on the MII bus for the switch.
+
+Optional properties:
+
+- reset-gpios          : Should be a gpio specifier for a reset line
+
+Example:
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               switch0: switch@0 {
+                       compatible = "marvell,mv88e6085";
+                       reg = <0>;
+                      reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+               };
+       };
index ecacfa44b1eb9603d8b77c712dcff565ef4f470d..d4b7f2e4998403113158027d99fbaef8d430dcc6 100644 (file)
@@ -7,19 +7,45 @@ Required properties:
 - mode: dsa fabric mode string. only support one of dsaf modes like these:
                "2port-64vf",
                "6port-16rss",
-               "6port-16vf".
+               "6port-16vf",
+               "single-port".
 - interrupt-parent: the interrupt parent of this device.
 - interrupts: should contain the DSA Fabric and rcb interrupt.
 - reg: specifies base physical address(es) and size of the device registers.
-  The first region is external interface control register base and size.
-  The second region is SerDes base register and size.
+  The first region is external interface control register base and size(optional,
+  only used when subctrl-syscon does not exist). It is recommended using
+  subctrl-syscon rather than this address.
+  The second region is SerDes base register and size(optional, only used when
+  serdes-syscon in port node does not exist). It is recommended using
+  serdes-syscon rather than this address.
   The third region is the PPE register base and size.
-  The fourth region is dsa fabric base register and size.
-  The fifth region is cpld base register and size, it is not required if do not use cpld.
-- phy-handle: phy handle of physicl port, 0 if not any phy device. see ethernet.txt [1].
+  The fourth region is dsa fabric base register and size. It is not required for
+  single-port mode.
+- reg-names: may be ppe-base and(or) dsaf-base. It is used to find the
+  corresponding reg's index.
+
+- phy-handle: phy handle of physical port, 0 if not any phy device. It is optional
+  attribute. If port node exists, phy-handle in each port node will be used.
+  see ethernet.txt [1].
+- subctrl-syscon: is syscon handle for external interface control register.
+- reset-field-offset: is offset of reset field. Its value depends on the hardware
+  user manual.
 - buf-size: rx buffer size, should be 16-1024.
 - desc-num: number of description in TX and RX queue, should be 512, 1024, 2048 or 4096.
 
+- port: subnodes of dsaf. A dsaf node may contain several port nodes(Depending
+  on mode of dsaf). Port node contain some attributes listed below:
+- reg: is physical port index in one dsaf.
+- phy-handle: phy handle of physical port. It is not required if there isn't
+  phy device. see ethernet.txt [1].
+- serdes-syscon: is syscon handle for SerDes register.
+- cpld-syscon: is syscon handle + register offset pair for cpld register. It is
+  not required if there isn't cpld device.
+- port-rst-offset: is offset of reset field for each port in dsaf. Its value
+  depends on the hardware user manual.
+- port-mode-offset: is offset of port mode field for each port in dsaf. Its
+  value depends on the hardware user manual.
+
 [1] Documentation/devicetree/bindings/net/phy.txt
 
 Example:
@@ -28,11 +54,11 @@ dsaf0: dsa@c7000000 {
        compatible = "hisilicon,hns-dsaf-v1";
        mode = "6port-16rss";
        interrupt-parent = <&mbigen_dsa>;
-       reg = <0x0 0xC0000000 0x0 0x420000
-              0x0 0xC2000000 0x0 0x300000
-              0x0 0xc5000000 0x0 0x890000
+       reg = <0x0 0xc5000000 0x0 0x890000
               0x0 0xc7000000 0x0 0x60000>;
-       phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+       reg-names = "ppe-base", "dsaf-base";
+       subctrl-syscon = <&subctrl>;
+       reset-field-offset = 0;
        interrupts = <131 4>,<132 4>, <133 4>,<134 4>,
                     <135 4>,<136 4>, <137 4>,<138 4>,
                     <139 4>,<140 4>, <141 4>,<142 4>,
@@ -43,4 +69,15 @@ dsaf0: dsa@c7000000 {
        buf-size = <4096>;
        desc-num = <1024>;
        dma-coherent;
+
+       port@0 {
+               reg = 0;
+               phy-handle = <&phy0>;
+               serdes-syscon = <&serdes>;
+       };
+
+       port@1 {
+                reg = 1;
+                serdes-syscon = <&serdes>;
+        };
 };
index e6a9d1c30878f89ff948d5fcd8cbe308e53327df..b9ff4ba6454e49e98e89ca2f99d1f4d03f09e275 100644 (file)
@@ -36,6 +36,34 @@ Required properties:
                        | | | | | |
                       external port
 
+  This attribute is remained for compatible purpose. It is not recommended to
+  use it in new code.
+
+- port-idx-in-ae: is the index of port provided by AE.
+  In NIC mode of DSAF, all 6 PHYs of service DSAF are taken as ethernet ports
+  to the CPU. The port-idx-in-ae can be 0 to 5. Here is the diagram:
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              |    |   | | | | | |
+           debug debug   service
+           port  port     port
+           (0)   (0)     (0-5)
+
+  In Switch mode of DSAF, all 6 PHYs of service DSAF are taken as physical
+  ports connected to a LAN Switch while the CPU side assume itself have one
+  single NIC connected to this switch. In this case, the port-idx-in-ae
+  will be 0 only.
+            +-----+-----+------+------+
+            |                CPU      |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+
+              |    |     service| port(0)
+            debug debug  +------------+
+            port  port   |   switch   |
+            (0)   (0)    +-+-+-+-+-+-++
+                          | | | | | |
+                         external port
+
 - local-mac-address: mac addr of the ethernet interface
 
 Example:
@@ -43,6 +71,6 @@ Example:
        ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
                ae-handle = <&dsaf0>;
-               port-id = <0>;
+               port-idx-in-ae = <0>;
                local-mac-address = [a2 14 e4 4b 56 76];
        };
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
new file mode 100644 (file)
index 0000000..14aa6cf
--- /dev/null
@@ -0,0 +1,56 @@
+Marvell 8897/8997 (sd8897/sd8997) bluetooth SDIO devices
+------
+
+Required properties:
+
+  - compatible : should be one of the following:
+       * "marvell,sd8897-bt"
+       * "marvell,sd8997-bt"
+
+Optional properties:
+
+  - marvell,cal-data: Calibration data downloaded to the device during
+                     initialization. This is an array of 28 values(u8).
+
+  - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
+                       firmware will use the pin to wakeup host system.
+  - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
+                     platform. The value will be configured to firmware. This
+                     is needed to work chip's sleep feature as expected.
+  - interrupt-parent: phandle of the parent interrupt controller
+  - interrupts : interrupt pin number to the cpu. Driver will request an irq based
+                on this interrupt number. During system suspend, the irq will be
+                enabled so that the bluetooth chip can wakeup host platform under
+                certain condition. During system resume, the irq will be disabled
+                to make sure unnecessary interrupt is not received.
+
+Example:
+
+IRQ pin 119 is used as system wakeup source interrupt.
+wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host
+using this device side pin and wakeup latency.
+calibration data is also available in below example.
+
+&mmc3 {
+       status = "okay";
+       vmmc-supply = <&wlan_en_reg>;
+       bus-width = <4>;
+       cap-power-off-card;
+       keep-power-in-suspend;
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+       btmrvl: bluetooth@2 {
+               compatible = "marvell,sd8897-bt";
+               reg = <2>;
+               interrupt-parent = <&pio>;
+               interrupts = <119 IRQ_TYPE_LEVEL_LOW>;
+
+               marvell,cal-data = /bits/ 8 <
+                       0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
+                       0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
+                       0x00 0x00 0xf0 0x00>;
+               marvell,wakeup-pin = <0x0d>;
+               marvell,wakeup-gap-ms = <0x64>;
+       };
+};
index 5ca79290eabf0fc65c1a37c03b10fa60ef9e71c4..32eaaca04d9bf501809b0e5719100463f614d37b 100644 (file)
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
 Required properties:
 - compatible: Should be "mediatek,mt7623-eth"
 - reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
+- interrupts: Should contain the three frame engines interrupts in numeric
+       order. These are fe_int0, fe_int1 and fe_int2.
 - clocks: the clock used by the core
 - clock-names: the names of the clock listed in the clocks property. These are
        "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
                 <&ethsys CLK_ETHSYS_GP2>,
                 <&ethsys CLK_ETHSYS_GP1>;
        clock-names = "ethif", "esw", "gp2", "gp1";
-       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
        power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
        resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
        reset-names = "eth";
diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
new file mode 100644 (file)
index 0000000..1dc3bc7
--- /dev/null
@@ -0,0 +1,59 @@
+* Microchip ENC28J60
+
+This is a standalone 10 MBit ethernet controller with SPI interface.
+
+For each device connected to a SPI bus, define a child node within
+the SPI master node.
+
+Required properties:
+- compatible: Should be "microchip,enc28j60"
+- reg: Specify the SPI chip select the ENC28J60 is wired to
+- interrupt-parent: Specify the phandle of the source interrupt, see interrupt
+                    binding documentation for details. Usually this is the GPIO bank
+                    the interrupt line is wired to.
+- interrupts: Specify the interrupt index within the interrupt controller (referred
+              to above in interrupt-parent) and interrupt type. The ENC28J60 natively
+              generates falling edge interrupts, however, additional board logic
+              might invert the signal.
+- pinctrl-names: List of assigned state names, see pinctrl binding documentation.
+- pinctrl-0: List of phandles to configure the GPIO pin used as interrupt line,
+             see also generic and your platform specific pinctrl binding
+             documentation.
+
+Optional properties:
+- spi-max-frequency: Maximum frequency of the SPI bus when accessing the ENC28J60.
+  According to the ENC28J80 datasheet, the chip allows a maximum of 20 MHz, however,
+  board designs may need to limit this value.
+- local-mac-address: See ethernet.txt in the same directory.
+
+
+Example (for NXP i.MX28 with pin control stuff for GPIO irq):
+
+        ssp2: ssp@80014000 {
+                compatible = "fsl,imx28-spi";
+                pinctrl-names = "default";
+                pinctrl-0 = <&spi2_pins_b &spi2_sck_cfg>;
+                status = "okay";
+
+                enc28j60: ethernet@0 {
+                        compatible = "microchip,enc28j60";
+                        pinctrl-names = "default";
+                        pinctrl-0 = <&enc28j60_pins>;
+                        reg = <0>;
+                        interrupt-parent = <&gpio3>;
+                        interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+                        spi-max-frequency = <12000000>;
+                };
+        };
+
+        pinctrl@80018000 {
+                enc28j60_pins: enc28j60_pins@0 {
+                        reg = <0>;
+                        fsl,pinmux-ids = <
+                                MX28_PAD_AUART0_RTS__GPIO_3_3    /* Interrupt */
+                        >;
+                        fsl,drive-strength = <MXS_DRIVE_4mA>;
+                        fsl,voltage = <MXS_VOLTAGE_HIGH>;
+                        fsl,pull-up = <MXS_PULL_DISABLE>;
+                };
+        };
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt
new file mode 100644 (file)
index 0000000..c421aba
--- /dev/null
@@ -0,0 +1,63 @@
+Marvell 8897/8997 (sd8897/sd8997) SDIO devices
+------
+
+This node provides properties for controlling the marvell sdio wireless device.
+The node is expected to be specified as a child node to the SDIO controller that
+connects the device to the system.
+
+Required properties:
+
+  - compatible : should be one of the following:
+       * "marvell,sd8897"
+       * "marvell,sd8997"
+
+Optional properties:
+
+  - marvell,caldata* : A series of properties with marvell,caldata prefix,
+                     represent calibration data downloaded to the device during
+                     initialization. This is an array of unsigned 8-bit values.
+                     the properties should follow below property name and
+                     corresponding array length:
+       "marvell,caldata-txpwrlimit-2g" (length = 566).
+       "marvell,caldata-txpwrlimit-5g-sub0" (length = 502).
+       "marvell,caldata-txpwrlimit-5g-sub1" (length = 688).
+       "marvell,caldata-txpwrlimit-5g-sub2" (length = 750).
+       "marvell,caldata-txpwrlimit-5g-sub3" (length = 502).
+  - marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured
+                     to firmware. Firmware will wakeup the host using this pin
+                     during suspend/resume.
+  - interrupt-parent: phandle of the parent interrupt controller
+  - interrupts : interrupt pin number to the cpu. driver will request an irq based on
+                this interrupt number. during system suspend, the irq will be enabled
+                so that the wifi chip can wakeup host platform under certain condition.
+                during system resume, the irq will be disabled to make sure
+                unnecessary interrupt is not received.
+
+Example:
+
+Tx power limit calibration data is configured in below example.
+The calibration data is an array of unsigned values, the length
+can vary between hw versions.
+IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured
+so that firmware can wakeup host using this device side pin.
+
+&mmc3 {
+       status = "okay";
+       vmmc-supply = <&wlan_en_reg>;
+       bus-width = <4>;
+       cap-power-off-card;
+       keep-power-in-suspend;
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+       mwifiex: wifi@1 {
+               compatible = "marvell,sd8897";
+               reg = <1>;
+               interrupt-parent = <&pio>;
+               interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
+
+               marvell,caldata_00_txpwrlimit_2g_cfg_set = /bits/ 8 <
+       0x01 0x00 0x06 0x00 0x08 0x02 0x89 0x01>;
+               marvell,wakeup-pin = <3>;
+       };
+};
index 96aae6b4f736802df34a6524202a5dd2653e4231..74d7f0af209c8178f078b99a9dd460d44d430f8c 100644 (file)
@@ -5,12 +5,18 @@ Required properties:
        * "qcom,ath10k"
        * "qcom,ipq4019-wifi"
 
-PCI based devices uses compatible string "qcom,ath10k" and takes only
-calibration data via "qcom,ath10k-calibration-data". Rest of the properties
-are not applicable for PCI based devices.
+PCI based devices uses compatible string "qcom,ath10k" and takes calibration
+data along with board specific data via "qcom,ath10k-calibration-data".
+Rest of the properties are not applicable for PCI based devices.
 
 AHB based devices (i.e. ipq4019) uses compatible string "qcom,ipq4019-wifi"
-and also uses most of the properties defined in this doc.
+and also uses most of the properties defined in this doc (except
+"qcom,ath10k-calibration-data"). It uses "qcom,ath10k-pre-calibration-data"
+to carry pre calibration data.
+
+In general, entry "qcom,ath10k-pre-calibration-data" and
+"qcom,ath10k-calibration-data" conflict with each other and only one
+can be provided per device.
 
 Optional properties:
 - reg: Address and length of the register set for the device.
@@ -35,8 +41,11 @@ Optional properties:
 - qcom,msi_addr: MSI interrupt address.
 - qcom,msi_base: Base value to add before writing MSI data into
                MSI address register.
-- qcom,ath10k-calibration-data : calibration data as an array, the
-                                length can vary between hw versions
+- qcom,ath10k-calibration-data : calibration data + board specific data
+                                as an array, the length can vary between
+                                hw versions.
+- qcom,ath10k-pre-calibration-data : pre calibration data as an array,
+                                    the length can vary between hw versions.
 
 Example (to supply the calibration data alone):
 
@@ -105,5 +114,5 @@ wifi0: wifi@a000000 {
                          "legacy";
        qcom,msi_addr = <0x0b006040>;
        qcom,msi_base = <0x40>;
-       qcom,ath10k-calibration-data = [ 01 02 03 ... ];
+       qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ];
 };
index 50c4f9b00adff971d32cda52189cd29880cf694e..e3b4809fbe82c4798b7e5bef55e666505d6df527 100644 (file)
@@ -8,15 +8,19 @@ Required properties:
        of memory mapped region.
 - clock-names: from common clock binding:
        Required elements: "24m"
-- rockchip,grf: phandle to the syscon managing the "general register files"
 - #phy-cells : from the generic PHY bindings, must be 0;
 
 Example:
 
-edp_phy: edp-phy {
-       compatible = "rockchip,rk3288-dp-phy";
-       rockchip,grf = <&grf>;
-       clocks = <&cru SCLK_EDP_24M>;
-       clock-names = "24m";
-       #phy-cells = <0>;
+grf: syscon@ff770000 {
+       compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
+
+...
+
+       edp_phy: edp-phy {
+               compatible = "rockchip,rk3288-dp-phy";
+               clocks = <&cru SCLK_EDP_24M>;
+               clock-names = "24m";
+               #phy-cells = <0>;
+       };
 };
index 61916f15a949cdae63356e7f4c827b8a9e394841..555cb0f406908a0fc5ebfc995a8df97af8a2dcba 100644 (file)
@@ -3,17 +3,23 @@ Rockchip EMMC PHY
 
 Required properties:
  - compatible: rockchip,rk3399-emmc-phy
- - rockchip,grf : phandle to the syscon managing the "general
-   register files"
  - #phy-cells: must be 0
- - reg: PHY configure reg address offset in "general
+ - reg: PHY register address offset and length in "general
    register files"
 
 Example:
 
-emmcphy: phy {
-       compatible = "rockchip,rk3399-emmc-phy";
-       rockchip,grf = <&grf>;
-       reg = <0xf780>;
-       #phy-cells = <0>;
+
+grf: syscon@ff770000 {
+       compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+...
+
+       emmcphy: phy@f780 {
+               compatible = "rockchip,rk3399-emmc-phy";
+               reg = <0xf780 0x20>;
+               #phy-cells = <0>;
+       };
 };
index 08a4a32c8eb0db76b533259db2c740316e32a9a3..0326154c792548997457fe6f3252867bbd19a6b4 100644 (file)
@@ -134,12 +134,12 @@ mfio80            ddr_debug, mips_trace_data, mips_debug
 mfio81         dreq0, mips_trace_data, eth_debug
 mfio82         dreq1, mips_trace_data, eth_debug
 mfio83         mips_pll_lock, mips_trace_data, usb_debug
-mfio84         sys_pll_lock, mips_trace_data, usb_debug
-mfio85         wifi_pll_lock, mips_trace_data, sdhost_debug
-mfio86         bt_pll_lock, mips_trace_data, sdhost_debug
-mfio87         rpu_v_pll_lock, dreq2, socif_debug
-mfio88         rpu_l_pll_lock, dreq3, socif_debug
-mfio89         audio_pll_lock, dreq4, dreq5
+mfio84         audio_pll_lock, mips_trace_data, usb_debug
+mfio85         rpu_v_pll_lock, mips_trace_data, sdhost_debug
+mfio86         rpu_l_pll_lock, mips_trace_data, sdhost_debug
+mfio87         sys_pll_lock, dreq2, socif_debug
+mfio88         wifi_pll_lock, dreq3, socif_debug
+mfio89         bt_pll_lock, dreq4, dreq5
 tck
 trstn
 tdi
index 3f6a524cc5ffce28dddb0f2d9e0717098ec6a5a9..32f4a2d6d0b31dc76d458e854c51375c291ea3b0 100644 (file)
@@ -1,13 +1,16 @@
 == Amlogic Meson pinmux controller ==
 
 Required properties for the root node:
- - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl"
+ - compatible: one of "amlogic,meson8-cbus-pinctrl"
+                     "amlogic,meson8b-cbus-pinctrl"
+                     "amlogic,meson8-aobus-pinctrl"
+                     "amlogic,meson8b-aobus-pinctrl"
  - reg: address and size of registers controlling irq functionality
 
 === GPIO sub-nodes ===
 
-The 2 power domains of the controller (regular and always-on) are
-represented as sub-nodes and each of them acts as a GPIO controller.
+The GPIO bank for the controller is represented as a sub-node and it acts as a
+GPIO controller.
 
 Required properties for sub-nodes are:
  - reg: should contain address and size for mux, pull-enable, pull and
@@ -18,10 +21,6 @@ Required properties for sub-nodes are:
  - gpio-controller: identifies the node as a gpio controller
  - #gpio-cells: must be 2
 
-Valid sub-node names are:
- - "banks" for the regular domain
- - "ao-bank" for the always-on domain
-
 === Other sub-nodes ===
 
 Child nodes without the "gpio-controller" represent some desired
@@ -45,7 +44,7 @@ pinctrl-bindings.txt
 === Example ===
 
        pinctrl: pinctrl@c1109880 {
-               compatible = "amlogic,meson8-pinctrl";
+               compatible = "amlogic,meson8-cbus-pinctrl";
                reg = <0xc1109880 0x10>;
                #address-cells = <1>;
                #size-cells = <1>;
@@ -61,15 +60,6 @@ pinctrl-bindings.txt
                        #gpio-cells = <2>;
                };
 
-               gpio_ao: ao-bank@c1108030 {
-                       reg = <0xc8100014 0x4>,
-                             <0xc810002c 0x4>,
-                             <0xc8100024 0x8>;
-                       reg-names = "mux", "pull", "gpio";
-                       gpio-controller;
-                       #gpio-cells = <2>;
-               };
-
                nand {
                        mux {
                                groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
@@ -79,18 +69,4 @@ pinctrl-bindings.txt
                                function = "nand";
                        };
                };
-
-               uart_ao_a {
-                       mux {
-                               groups = "uart_tx_ao_a", "uart_rx_ao_a",
-                                        "uart_cts_ao_a", "uart_rts_ao_a";
-                               function = "uart_ao";
-                       };
-
-                       conf {
-                               pins = "GPIOAO_0", "GPIOAO_1",
-                                      "GPIOAO_2", "GPIOAO_3";
-                               bias-disable;
-                       };
-               };
        };
index 1068ffce9f9125559e301f60f9c0ce6ca40d2374..fdde63a5419cb1942ea1480a761beb963db08479 100644 (file)
@@ -15,9 +15,10 @@ Required properties:
   is the rtc tick interrupt. The number of cells representing a interrupt
   depends on the parent interrupt controller.
 - clocks: Must contain a list of phandle and clock specifier for the rtc
-          and source clocks.
-- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
-               same order as the clocks property.
+          clock and in the case of a s3c6410 compatible controller, also
+          a source clock.
+- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
+               a "rtc_src" sorted in the same order as the clocks property.
 
 Example:
 
index 31f53f0ab95754a62e5aad6f08e73e3d2715cfac..4006298f670700b1da158616df1718dde422ef48 100644 (file)
@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
 which the timestamp reverts to 1970, i.e. moves backwards in time.
 
 Currently, cramfs must be written and read with architectures of the
-same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
+same endianness, and can be read only by kernels with PAGE_SIZE
 == 4096.  At least the latter of these is a bug, but it hasn't been
 decided what the best fix is.  For the moment if you have larger pages
 you can just change the #define in mkcramfs.c, so long as you don't
index d392e1505f170b1c6c6e828c37388b2443a38b89..d9c11d25bf02132bebeeceb1b5c2c7e04901d4c7 100644 (file)
@@ -60,7 +60,7 @@ size:      The limit of allocated bytes for this tmpfs instance. The
            default is half of your physical RAM without swap. If you
            oversize your tmpfs instances the machine will deadlock
            since the OOM handler will not be able to free that memory.
-nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
+nr_blocks: The same as size, but in blocks of PAGE_SIZE.
 nr_inodes: The maximum number of inodes for this instance. The default
            is half of the number of your physical RAM pages, or (on a
            machine with highmem) the number of lowmem RAM pages,
index b02a7d598258542e890eae7ab3b4b4503db251e2..4164bd6397a28efc94f5fc3ff3cd22369d84f4d3 100644 (file)
@@ -708,9 +708,9 @@ struct address_space_operations {
        from the address space.  This generally corresponds to either a
        truncation, punch hole  or a complete invalidation of the address
        space (in the latter case 'offset' will always be 0 and 'length'
-       will be PAGE_CACHE_SIZE). Any private data associated with the page
+       will be PAGE_SIZE). Any private data associated with the page
        should be updated to reflect this truncation.  If offset is 0 and
-       length is PAGE_CACHE_SIZE, then the private data should be released,
+       length is PAGE_SIZE, then the private data should be released,
        because the page must be able to be completely discarded.  This may
        be done by calling the ->releasepage function, but in this case the
        release MUST succeed.
index 3f0f5ce3338b63a8153410c9b08edba739989e22..36ea940e5bb91fe9711544e5d0bbe2e2293c8ca8 100644 (file)
@@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
     proximity of the device and while the value of the BTN_TOUCH code is 0. If
     the input device may be used freely in three dimensions, consider ABS_Z
     instead.
+  - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
+    proximity and set to 0 when the tool leaves detectable proximity.
+    BTN_TOOL_<name> signals the type of tool that is currently detected by the
+    hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
 
 * ABS_MT_<name>:
   - Used to describe multitouch input events. Please see
index ecc74fa4bfde8a55d9fee00d96ae5ae4123560e9..0b3de80ec8f69c1aef9dfaa59fb86c14395ba43e 100644 (file)
@@ -4077,6 +4077,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        sector if the number is odd);
                                i = IGNORE_DEVICE (don't bind to this
                                        device);
+                               j = NO_REPORT_LUNS (don't use report luns
+                                       command, uas only);
                                l = NOT_LOCKABLE (don't try to lock and
                                        unlock ejectable media);
                                m = MAX_SECTORS_64 (don't transfer more
index 3f24df8c6e6557cf1cf0d9e25857dead9cd0d94d..50b8589d12fd167371c87ca457708146bb765ae3 100644 (file)
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
 using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
 platform bus to obtain component resources. The designs used to test this
 driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
-and tested with ARM and NIOS processor hosts seperately. The anticipated use
+and tested with ARM and NIOS processor hosts separately. The anticipated use
 cases are simple communications between an embedded system and an external peer
 for status and simple configuration of the embedded system.
 
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
 4.1) Transmit process
 When the driver's transmit routine is called by the kernel, it sets up a
 transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
-MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
 interrupt is driven by the transmit DMA logic. The driver handles the transmit
 completion in the context of the interrupt handling chain by recycling
 resource required to send and track the requested transmit operation.
 
 4.2) Receive process
 The driver will post receive buffers to the receive DMA logic during driver
-intialization. Receive buffers may or may not be queued depending upon the
+initialization. Receive buffers may or may not be queued depending upon the
 underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
 to queue receive buffers to the SGDMA receive logic). When a packet is
 received, the DMA logic generates an interrupt. The driver handles a receive
index 334b49ef02d13eea3bbbb6213ec5241881b20fe6..57f52cdce32e42c9d170e8e475a02e8a08e1f454 100644 (file)
@@ -1880,8 +1880,8 @@ or more peers on the local network.
 
        The ARP monitor relies on the device driver itself to verify
 that traffic is flowing.  In particular, the driver must keep up to
-date the last receive time, dev->last_rx, and transmit start time,
-dev->trans_start.  If these are not updated by the driver, then the
+date the last receive time, dev->last_rx.  Drivers that use NETIF_F_LLTX
+flag must also update netdev_queue->trans_start.  If they do not, then the
 ARP monitor will immediately fail any slaves using that driver, and
 those slaves will stay down.  If networking monitoring (tcpdump, etc)
 shows the ARP requests and replies on the network, then it may be that
index de2a327766a7ed2285b65cf4023c214205f47e70..56e36861245f108595bb340123b2dece1ea2bead 100644 (file)
@@ -69,18 +69,18 @@ LCO: Local Checksum Offload
 LCO is a technique for efficiently computing the outer checksum of an
  encapsulated datagram when the inner checksum is due to be offloaded.
 The ones-complement sum of a correctly checksummed TCP or UDP packet is
- equal to the sum of the pseudo header, because everything else gets
- 'cancelled out' by the checksum field.  This is because the sum was
+ equal to the complement of the sum of the pseudo header, because everything
else gets 'cancelled out' by the checksum field.  This is because the sum was
  complemented before being written to the checksum field.
 More generally, this holds in any case where the 'IP-style' ones complement
  checksum is used, and thus any checksum that TX Checksum Offload supports.
 That is, if we have set up TX Checksum Offload with a start/offset pair, we
- know that _after the device has filled in that checksum_, the ones
+ know that after the device has filled in that checksum, the ones
  complement sum from csum_start to the end of the packet will be equal to
- _whatever value we put in the checksum field beforehand_.  This allows us
- to compute the outer checksum without looking at the payload: we simply
- stop summing when we get to csum_start, then add the 16-bit word at
- (csum_start + csum_offset).
+ the complement of whatever value we put in the checksum field beforehand.
+ This allows us to compute the outer checksum without looking at the payload:
+ we simply stop summing when we get to csum_start, then add the complement of
the 16-bit word at (csum_start + csum_offset).
 Then, when the true inner checksum is filled in (either by hardware or by
  skb_checksum_help()), the outer checksum will become correct by virtue of
  the arithmetic.
index d999d0c1c5b83a19b4406870c478810bfaf0a5f4..eba3a2431e914ffb787eaf08d96c16b405f94d51 100644 (file)
@@ -38,7 +38,7 @@ Implementation details
 ======================
 
 The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
-driver; see Documentation/networking/dsa/dsa.txt for details on the subsytem
+driver; see Documentation/networking/dsa/dsa.txt for details on the subsystem
 and what it provides.
 
 The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
index ba698c56919d2f3f8f94b29addcfc4d31e3c9603..631b0f7ae16fd6506a7e171d04f874b00c09da67 100644 (file)
@@ -334,7 +334,7 @@ more specifically with its VLAN filtering portion when configuring VLANs on top
 of per-port slave network devices. Since DSA primarily deals with
 MDIO-connected switches, although not exclusively, SWITCHDEV's
 prepare/abort/commit phases are often simplified into a prepare phase which
-checks whether the operation is supporte by the DSA switch driver, and a commit
+checks whether the operation is supported by the DSA switch driver, and a commit
 phase which applies the changes.
 
 As of today, the only SWITCHDEV objects supported by DSA are the FDB and VLAN
index 96da119a47e70fefa7844137c13ea33f070dc077..6aef0b5f3bc7d51b3ed37760ea151c763642f47e 100644 (file)
@@ -1095,6 +1095,87 @@ all use cases.
 
 See details of eBPF verifier in kernel/bpf/verifier.c
 
+Direct packet access
+--------------------
+In cls_bpf and act_bpf programs the verifier allows direct access to the packet
+data via skb->data and skb->data_end pointers.
+Ex:
+1:  r4 = *(u32 *)(r1 +80)  /* load skb->data_end */
+2:  r3 = *(u32 *)(r1 +76)  /* load skb->data */
+3:  r5 = r3
+4:  r5 += 14
+5:  if r5 > r4 goto pc+16
+R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
+6:  r0 = *(u16 *)(r3 +12) /* access 12 and 13 bytes of the packet */
+
+this 2byte load from the packet is safe to do, since the program author
+did check 'if (skb->data + 14 > skb->data_end) goto err' at insn #5 which
+means that in the fall-through case the register R3 (which points to skb->data)
+has at least 14 directly accessible bytes. The verifier marks it
+as R3=pkt(id=0,off=0,r=14).
+id=0 means that no additional variables were added to the register.
+off=0 means that no additional constants were added.
+r=14 is the range of safe access which means that bytes [R3, R3 + 14) are ok.
+Note that R5 is marked as R5=pkt(id=0,off=14,r=14). It also points
+to the packet data, but constant 14 was added to the register, so
+it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14)
+which is zero bytes.
+
+More complex packet access may look like:
+ R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
+ 6:  r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */
+ 7:  r4 = *(u8 *)(r3 +12)
+ 8:  r4 *= 14
+ 9:  r3 = *(u32 *)(r1 +76) /* load skb->data */
+10:  r3 += r4
+11:  r2 = r1
+12:  r2 <<= 48
+13:  r2 >>= 48
+14:  r3 += r2
+15:  r2 = r3
+16:  r2 += 8
+17:  r1 = *(u32 *)(r1 +80) /* load skb->data_end */
+18:  if r2 > r1 goto pc+2
+ R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp
+19:  r1 = *(u8 *)(r3 +4)
+The state of the register R3 is R3=pkt(id=2,off=0,r=8)
+id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some
+offset within a packet and since the program author did
+'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8).
+The verifier only allows 'add' operation on packet registers. Any other
+operation will set the register state to 'unknown_value' and it won't be
+available for direct packet access.
+Operation 'r3 += rX' may overflow and become less than original skb->data,
+therefore the verifier has to prevent that. So it tracks the number of
+upper zero bits in all 'uknown_value' registers, so when it sees
+'r3 += rX' instruction and rX is more than 16-bit value, it will error as:
+"cannot add integer value with N upper zero bits to ptr_to_packet"
+Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is
+R4=inv56 which means that upper 56 bits on the register are guaranteed
+to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since
+multiplying 8-bit value by constant 14 will keep upper 52 bits as zero.
+Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign
+extending. This logic is implemented in evaluate_reg_alu() function.
+
+The end result is that bpf program author can access packet directly
+using normal C code as:
+  void *data = (void *)(long)skb->data;
+  void *data_end = (void *)(long)skb->data_end;
+  struct eth_hdr *eth = data;
+  struct iphdr *iph = data + sizeof(*eth);
+  struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph);
+
+  if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end)
+          return 0;
+  if (eth->h_proto != htons(ETH_P_IP))
+          return 0;
+  if (iph->protocol != IPPROTO_UDP || iph->ihl != 5)
+          return 0;
+  if (udp->dest == 53 || udp->source == 9)
+          ...;
+which makes such programs easier to write comparing to LD_ABS insn
+and significantly faster.
+
 eBPF maps
 ---------
 'maps' is a generic storage of different types for sharing data between kernel
@@ -1293,5 +1374,5 @@ to give potential BPF hackers or security auditors a better overview of
 the underlying architecture.
 
 Jay Schulist <jschlst@samba.org>
-Daniel Borkmann <dborkman@redhat.com>
-Alexei Starovoitov <ast@plumgrid.com>
+Daniel Borkmann <daniel@iogearbox.net>
+Alexei Starovoitov <ast@kernel.org>
index 70e6275b757a46c786cd1e48f3532317a6c15b65..ff630a87b511c2e1e9f13918ab3d014442fb3af6 100644 (file)
@@ -33,7 +33,8 @@ my_dumping_routine(struct sk_buff *skb, ...)
 {
        struct gnet_dump dump;
 
-       if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump) < 0)
+       if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump,
+                                 TCA_PAD) < 0)
                goto rtattr_failure;
 
        if (gnet_stats_copy_basic(&dump, &mystruct->bstats) < 0 ||
@@ -56,7 +57,8 @@ existing TLV types.
 my_dumping_routine(struct sk_buff *skb, ...)
 {
     if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-               TCA_XSTATS, &mystruct->lock, &dump) < 0)
+                                    TCA_XSTATS, &mystruct->lock, &dump,
+                                    TCA_PAD) < 0)
                goto rtattr_failure;
        ...
 }
index b183e2b606c8a8e39ea8ce9a95857b60224d139d..6c7f365b15157a42538a9fa65a43db06b1a08af9 100644 (file)
@@ -63,6 +63,16 @@ fwmark_reflect - BOOLEAN
        fwmark of the packet they are replying to.
        Default: 0
 
+fib_multipath_use_neigh - BOOLEAN
+       Use status of existing neighbor entry when determining nexthop for
+       multipath routes. If disabled, neighbor information is not used and
+       packets could be directed to a failed nexthop. Only valid for kernels
+       built with CONFIG_IP_ROUTE_MULTIPATH enabled.
+       Default: 0 (disabled)
+       Possible values:
+       0 - disabled
+       1 - enabled
+
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
index cf996394e466b708d5ca02757143205d519ceb5b..14422f8fcdc474f5f32cde7868803cf67fbb0805 100644 (file)
@@ -8,7 +8,7 @@ Initial Release:
        This is conceptually very similar to the macvlan driver with one major
 exception of using L3 for mux-ing /demux-ing among slaves. This property makes
 the master device share the L2 with it's slave devices. I have developed this
-driver in conjuntion with network namespaces and not sure if there is use case
+driver in conjunction with network namespaces and not sure if there is use case
 outside of it.
 
 
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
 as well.
 
 4.2 L3 mode:
-       In this mode TX processing upto L3 happens on the stack instance attached
+       In this mode TX processing up to L3 happens on the stack instance attached
 to the slave device and packets are switched to the stack instance of the
 master device for the L2 processing and routing from that instance will be
 used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
        (a) The Linux host that is connected to the external switch / router has
 policy configured that allows only one mac per port.
        (b) No of virtual devices created on a master exceed the mac capacity and
-puts the NIC in promiscous mode and degraded performance is a concern.
+puts the NIC in promiscuous mode and degraded performance is a concern.
        (c) If the slave device is to be put into the hostile / untrusted network
 namespace where L2 on the slave could be changed / misused.
 
index f310edec8a776f0a8ffb4a943a386ea45990b1a6..7413eb05223b104f4d8a00d9615dbd694b596cf4 100644 (file)
@@ -131,13 +131,11 @@ stack. Driver should not change behaviour based on them.
 
  * LLTX driver (deprecated for hardware drivers)
 
-NETIF_F_LLTX should be set in drivers that implement their own locking in
-transmit path or don't need locking at all (e.g. software tunnels).
-In ndo_start_xmit, it is recommended to use a try_lock and return
-NETDEV_TX_LOCKED when the spin lock fails.  The locking should also properly
-protect against other callbacks (the rules you need to find out).
+NETIF_F_LLTX is meant to be used by drivers that don't need locking at all,
+e.g. software tunnels.
 
-Don't use it for new drivers.
+This is also used in a few legacy drivers that implement their
+own locking, don't use it for new (hardware) drivers.
 
  * netns-local device
 
index 0b1cf6b2a592f4f02f6d7c90f996428ba9e9fead..7fec2061a334fd489acc127ca1630a298eb3bda7 100644 (file)
@@ -69,10 +69,9 @@ ndo_start_xmit:
 
        When the driver sets NETIF_F_LLTX in dev->features this will be
        called without holding netif_tx_lock. In this case the driver
-       has to lock by itself when needed. It is recommended to use a try lock
-       for this and return NETDEV_TX_LOCKED when the spin lock fails.
-       The locking there should also properly protect against 
-       set_rx_mode. Note that the use of NETIF_F_LLTX is deprecated.
+       has to lock by itself when needed.
+       The locking there should also properly protect against
+       set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated.
        Don't use it for new drivers.
 
        Context: Process with BHs disabled or BH (timer),
@@ -83,8 +82,6 @@ ndo_start_xmit:
        o NETDEV_TX_BUSY Cannot transmit packet, try later 
          Usually a bug, means queue start/stop flow control is broken in
          the driver. Note: the driver must NOT put the skb in its DMA ring.
-       o NETDEV_TX_LOCKED Locking failed, please retry quickly.
-         Only valid when NETIF_F_LLTX is set.
 
 ndo_tx_timeout:
        Synchronization: netif_tx_lock spinlock; all TX queues frozen.
index f4be85e9600578e7411f1baa1ab37041a677fe4a..2c4e3354e12891e755de5986b6f9fa82eff0657b 100644 (file)
@@ -67,12 +67,12 @@ The two basic thread commands are:
  * add_device DEVICE@NAME -- adds a single device
  * rem_device_all         -- remove all associated devices
 
-When adding a device to a thread, a corrosponding procfile is created
+When adding a device to a thread, a corresponding procfile is created
 which is used for configuring this device. Thus, device names need to
 be unique.
 
 To support adding the same device to multiple threads, which is useful
-with multi queue NICs, the device naming scheme is extended with "@":
+with multi queue NICs, the device naming scheme is extended with "@":
  device@something
 
 The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@ Sample scripts
 
 A collection of tutorial scripts and helpers for pktgen is in the
 samples/pktgen directory. The helper parameters.sh file support easy
-and consistant parameter parsing across the sample scripts.
+and consistent parameter parsing across the sample scripts.
 
 Usage example and help:
  ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
new file mode 100644 (file)
index 0000000..f200467
--- /dev/null
@@ -0,0 +1,130 @@
+Segmentation Offloads in the Linux Networking Stack
+
+Introduction
+============
+
+This document describes a set of techniques in the Linux networking stack
+to take advantage of segmentation offload capabilities of various NICs.
+
+The following technologies are described:
+ * TCP Segmentation Offload - TSO
+ * UDP Fragmentation Offload - UFO
+ * IPIP, SIT, GRE, and UDP Tunnel Offloads
+ * Generic Segmentation Offload - GSO
+ * Generic Receive Offload - GRO
+ * Partial Generic Segmentation Offload - GSO_PARTIAL
+
+TCP Segmentation Offload
+========================
+
+TCP segmentation allows a device to segment a single frame into multiple
+frames with a data payload size specified in skb_shinfo()->gso_size.
+When TCP segmentation requested the bit for either SKB_GSO_TCP or
+SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
+skb_shinfo()->gso_size should be set to a non-zero value.
+
+TCP segmentation is dependent on support for the use of partial checksum
+offload.  For this reason TSO is normally disabled if the Tx checksum
+offload for a given device is disabled.
+
+In order to support TCP segmentation offload it is necessary to populate
+the network and transport header offsets of the skbuff so that the device
+drivers will be able determine the offsets of the IP or IPv6 header and the
+TCP header.  In addition as CHECKSUM_PARTIAL is required csum_start should
+also point to the TCP header of the packet.
+
+For IPv4 segmentation we support one of two types in terms of the IP ID.
+The default behavior is to increment the IP ID with every segment.  If the
+GSO type SKB_GSO_TCP_FIXEDID is specified then we will not increment the IP
+ID and all segments will use the same IP ID.  If a device has
+NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO
+and we will either increment the IP ID for all frames, or leave it at a
+static value based on driver preference.
+
+UDP Fragmentation Offload
+=========================
+
+UDP fragmentation offload allows a device to fragment an oversized UDP
+datagram into multiple IPv4 fragments.  Many of the requirements for UDP
+fragmentation offload are the same as TSO.  However the IPv4 ID for
+fragments should not increment as a single IPv4 datagram is fragmented.
+
+IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
+========================================================
+
+In addition to the offloads described above it is possible for a frame to
+contain additional headers such as an outer tunnel.  In order to account
+for such instances an additional set of segmentation offload types were
+introduced including SKB_GSO_IPIP, SKB_GSO_SIT, SKB_GSO_GRE, and
+SKB_GSO_UDP_TUNNEL.  These extra segmentation types are used to identify
+cases where there are more than just 1 set of headers.  For example in the
+case of IPIP and SIT we should have the network and transport headers moved
+from the standard list of headers to "inner" header offsets.
+
+Currently only two levels of headers are supported.  The convention is to
+refer to the tunnel headers as the outer headers, while the encapsulated
+data is normally referred to as the inner headers.  Below is the list of
+calls to access the given headers:
+
+IPIP/SIT Tunnel:
+               Outer                   Inner
+MAC            skb_mac_header
+Network                skb_network_header      skb_inner_network_header
+Transport      skb_transport_header
+
+UDP/GRE Tunnel:
+               Outer                   Inner
+MAC            skb_mac_header          skb_inner_mac_header
+Network                skb_network_header      skb_inner_network_header
+Transport      skb_transport_header    skb_inner_transport_header
+
+In addition to the above tunnel types there are also SKB_GSO_GRE_CSUM and
+SKB_GSO_UDP_TUNNEL_CSUM.  These two additional tunnel types reflect the
+fact that the outer header also requests to have a non-zero checksum
+included in the outer header.
+
+Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header
+has requested a remote checksum offload.  In this case the inner headers
+will be left with a partial checksum and only the outer header checksum
+will be computed.
+
+Generic Segmentation Offload
+============================
+
+Generic segmentation offload is a pure software offload that is meant to
+deal with cases where device drivers cannot perform the offloads described
+above.  What occurs in GSO is that a given skbuff will have its data broken
+out over multiple skbuffs that have been resized to match the MSS provided
+via skb_shinfo()->gso_size.
+
+Before enabling any hardware segmentation offload a corresponding software
+offload is required in GSO.  Otherwise it becomes possible for a frame to
+be re-routed between devices and end up being unable to be transmitted.
+
+Generic Receive Offload
+=======================
+
+Generic receive offload is the complement to GSO.  Ideally any frame
+assembled by GRO should be segmented to create an identical sequence of
+frames using GSO, and any sequence of frames segmented by GSO should be
+able to be reassembled back to the original by GRO.  The only exception to
+this is IPv4 ID in the case that the DF bit is set for a given IP header.
+If the value of the IPv4 ID is not sequentially incrementing it will be
+altered so that it is when a frame assembled via GRO is segmented via GSO.
+
+Partial Generic Segmentation Offload
+====================================
+
+Partial generic segmentation offload is a hybrid between TSO and GSO.  What
+it effectively does is take advantage of certain traits of TCP and tunnels
+so that instead of having to rewrite the packet headers for each segment
+only the inner-most transport header and possibly the outer-most network
+header need to be updated.  This allows devices that do not support tunnel
+offloads or tunnel offloads with checksum to still make use of segmentation.
+
+With the partial offload what occurs is that all headers excluding the
+inner transport header are updated such that they will contain the correct
+values for if the header was simply duplicated.  The one exception to this
+is the outer IPv4 ID field.  It is up to the device drivers to guarantee
+that the IPv4 ID field is incremented in the case that a given header does
+not have the DF bit set.
index d52aa10cfe911c88b47927c25cfd8ef596c65986..5da679c573d2326c88cdcd6eccde9805f8dd06dc 100644 (file)
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
 the VRF device. Similarly on egress routing rules are used to send packets
 to the VRF device driver before getting sent out the actual interface. This
 allows tcpdump on a VRF device to capture all packets into and out of the
-VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
 using the VRF device to specify rules that apply to the VRF domain as a whole.
 
 [1] Packets in the forwarded state do not flow through the device, so those
index d7aac9dedeb4266d970e8cc910726a027305a0b3..8d88e0f2ec493be2e519dbaec3e7995f5027a325 100644 (file)
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
 from Jamal <hadi@cyberus.ca>.
 
 The end goal for syncing is to be able to insert attributes + generate
-events so that the an SA can be safely moved from one machine to another
+events so that the SA can be safely moved from one machine to another
 for HA purposes.
 The idea is to synchronize the SA so that the takeover machine can do
 the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
 These patches add ability to sync and have accurate lifetime byte (to
 ensure proper decay of SAs) and replay counters to avoid replay attacks
 with as minimal loss at failover time.
-This way a backup stays as closely uptodate as an active member.
+This way a backup stays as closely up-to-date as an active member.
 
 Because the above items change for every packet the SA receives,
 it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
 there is a period where the timer threshold expires with no packets
 seen, then an odd behavior is seen as follows:
 The first packet arrival after a timer expiry will trigger a timeout
-aevent; i.e we dont wait for a timeout period or a packet threshold
+event; i.e we don't wait for a timeout period or a packet threshold
 to be reached. This is done for simplicity and efficiency reasons.
 
 -JHS
index 7328cf85236c2b2bc5a4c078dc9f81f2e3febd11..1fd1fbe9ce95adf9b37109cfea9833d586fcfffe 100644 (file)
@@ -586,6 +586,10 @@ drivers to make their ->remove() callbacks avoid races with runtime PM directly,
 but also it allows of more flexibility in the handling of devices during the
 removal of their drivers.
 
+Drivers in ->remove() callback should undo the runtime PM changes done
+in ->probe(). Usually this means calling pm_runtime_disable(),
+pm_runtime_dont_use_autosuspend() etc.
+
 The user space can effectively disallow the driver of the device to power manage
 it at run time by changing the value of its /sys/devices/.../power/control
 attribute to "on", which causes pm_runtime_forbid() to be called.  In principle,
index cb0368459da31409996c8e01fa8add867ec0c5d8..34a5fece31216320181fccfbdaf3a85ad91043be 100644 (file)
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
 "Zone Order" orders the zonelists by zone type, then by node within each
 zone.  Specify "[Zz]one" for zone order.
 
-Specify "[Dd]efault" to request automatic configuration.  Autoconfiguration
-will select "node" order in following case.
-(1) if the DMA zone does not exist or
-(2) if the DMA zone comprises greater than 50% of the available memory or
-(3) if any node's DMA zone comprises greater than 70% of its local memory and
-    the amount of local memory is big enough.
-
-Otherwise, "zone" order will be selected. Default order is recommended unless
-this is causing problems for your system/application.
+Specify "[Dd]efault" to request automatic configuration.
+
+On 32-bit, the Normal zone needs to be preserved for allocations accessible
+by the kernel, so "zone" order will be selected.
+
+On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
+order will be selected.
+
+Default order is recommended unless this is causing problems for your
+system/application.
 
 ==============================================================
 
index 7d66a8636cb5d16530194445d9000b2d9064197e..5faf514047e9f8f9c52d5a1549221cccc0090a45 100644 (file)
@@ -43,7 +43,7 @@ For the gadget two work under Windows two conditions have to be met:
 First of all, Windows need to detect the gadget as an USB composite
 gadget which on its own have some conditions[4].  If they are met,
 Windows lets USB Generic Parent Driver[5] handle the device which then
-tries to much drivers for each individual interface (sort of, don't
+tries to match drivers for each individual interface (sort of, don't
 get into too many details).
 
 The good news is: you do not have to worry about most of the
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
new file mode 100644 (file)
index 0000000..c281ded
--- /dev/null
@@ -0,0 +1,27 @@
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
+which will be found on future Intel CPUs.
+
+Memory Protection Keys provides a mechanism for enforcing page-based
+protections, but without requiring modification of the page tables
+when an application changes protection domains.  It works by
+dedicating 4 previously ignored bits in each page table entry to a
+"protection key", giving 16 possible keys.
+
+There is also a new user-accessible register (PKRU) with two separate
+bits (Access Disable and Write Disable) for each key.  Being a CPU
+register, PKRU is inherently thread-local, potentially giving each
+thread a different set of protections from every other thread.
+
+There are two new instructions (RDPKRU/WRPKRU) for reading and writing
+to the new register.  The feature is only available in 64-bit mode,
+even though there is theoretically space in the PAE PTEs.  These
+permissions are enforced on data access only and have no effect on
+instruction fetches.
+
+=========================== Config Option ===========================
+
+This config option adds approximately 1.5kb of text. and 50 bytes of
+data to the executable.  A workload which does large O_DIRECT reads
+of holes in XFS files was run to exercise get_user_pages_fast().  No
+performance delta was observed with the config option
+enabled or disabled.
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
new file mode 100644 (file)
index 0000000..06afac2
--- /dev/null
@@ -0,0 +1,208 @@
+x86 Topology
+============
+
+This documents and clarifies the main aspects of x86 topology modelling and
+representation in the kernel. Update/change when doing changes to the
+respective code.
+
+The architecture-agnostic topology definitions are in
+Documentation/cputopology.txt. This file holds x86-specific
+differences/specialities which must not necessarily apply to the generic
+definitions. Thus, the way to read up on Linux topology on x86 is to start
+with the generic one and look at this one in parallel for the x86 specifics.
+
+Needless to say, code should use the generic functions - this file is *only*
+here to *document* the inner workings of x86 topology.
+
+Started by Thomas Gleixner <tglx@linutronix.de> and Borislav Petkov <bp@alien8.de>.
+
+The main aim of the topology facilities is to present adequate interfaces to
+code which needs to know/query/use the structure of the running system wrt
+threads, cores, packages, etc.
+
+The kernel does not care about the concept of physical sockets because a
+socket has no relevance to software. It's an electromechanical component. In
+the past a socket always contained a single package (see below), but with the
+advent of Multi Chip Modules (MCM) a socket can hold more than one package. So
+there might be still references to sockets in the code, but they are of
+historical nature and should be cleaned up.
+
+The topology of a system is described in the units of:
+
+    - packages
+    - cores
+    - threads
+
+* Package:
+
+  Packages contain a number of cores plus shared resources, e.g. DRAM
+  controller, shared caches etc.
+
+  AMD nomenclature for package is 'Node'.
+
+  Package-related topology information in the kernel:
+
+  - cpuinfo_x86.x86_max_cores:
+
+    The number of cores in a package. This information is retrieved via CPUID.
+
+  - cpuinfo_x86.phys_proc_id:
+
+    The physical ID of the package. This information is retrieved via CPUID
+    and deduced from the APIC IDs of the cores in the package.
+
+  - cpuinfo_x86.logical_id:
+
+    The logical ID of the package. As we do not trust BIOSes to enumerate the
+    packages in a consistent way, we introduced the concept of logical package
+    ID so we can sanely calculate the number of maximum possible packages in
+    the system and have the packages enumerated linearly.
+
+  - topology_max_packages():
+
+    The maximum possible number of packages in the system. Helpful for per
+    package facilities to preallocate per package information.
+
+
+* Cores:
+
+  A core consists of 1 or more threads. It does not matter whether the threads
+  are SMT- or CMT-type threads.
+
+  AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
+  "core".
+
+  Core-related topology information in the kernel:
+
+  - smp_num_siblings:
+
+    The number of threads in a core. The number of threads in a package can be
+    calculated by:
+
+       threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
+
+
+* Threads:
+
+  A thread is a single scheduling unit. It's the equivalent to a logical Linux
+  CPU.
+
+  AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
+  uses "thread".
+
+  Thread-related topology information in the kernel:
+
+  - topology_core_cpumask():
+
+    The cpumask contains all online threads in the package to which a thread
+    belongs.
+
+    The number of online threads is also printed in /proc/cpuinfo "siblings."
+
+  - topology_sibling_mask():
+
+    The cpumask contains all online threads in the core to which a thread
+    belongs.
+
+   - topology_logical_package_id():
+
+    The logical package ID to which a thread belongs.
+
+   - topology_physical_package_id():
+
+    The physical package ID to which a thread belongs.
+
+   - topology_core_id();
+
+    The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo
+    "core_id."
+
+
+
+System topology examples
+
+Note:
+
+The alternative Linux CPU enumeration depends on how the BIOS enumerates the
+threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
+That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
+the same whether threads are enabled or not. That's merely an implementation
+detail and has no practical impact.
+
+1) Single Package, Single Core
+
+   [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+
+2) Single Package, Dual Core
+
+   a) One thread per core
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                   -> [core 1] -> [thread 0] -> Linux CPU 1
+
+   b) Two threads per core
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                               -> [thread 1] -> Linux CPU 1
+                   -> [core 1] -> [thread 0] -> Linux CPU 2
+                               -> [thread 1] -> Linux CPU 3
+
+      Alternative enumeration:
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                               -> [thread 1] -> Linux CPU 2
+                   -> [core 1] -> [thread 0] -> Linux CPU 1
+                               -> [thread 1] -> Linux CPU 3
+
+      AMD nomenclature for CMT systems:
+
+       [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
+                                    -> [Compute Unit Core 1] -> Linux CPU 1
+                -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
+                                    -> [Compute Unit Core 1] -> Linux CPU 3
+
+4) Dual Package, Dual Core
+
+   a) One thread per core
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                   -> [core 1] -> [thread 0] -> Linux CPU 1
+
+       [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
+                   -> [core 1] -> [thread 0] -> Linux CPU 3
+
+   b) Two threads per core
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                               -> [thread 1] -> Linux CPU 1
+                   -> [core 1] -> [thread 0] -> Linux CPU 2
+                               -> [thread 1] -> Linux CPU 3
+
+       [package 1] -> [core 0] -> [thread 0] -> Linux CPU 4
+                               -> [thread 1] -> Linux CPU 5
+                   -> [core 1] -> [thread 0] -> Linux CPU 6
+                               -> [thread 1] -> Linux CPU 7
+
+      Alternative enumeration:
+
+       [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+                               -> [thread 1] -> Linux CPU 4
+                   -> [core 1] -> [thread 0] -> Linux CPU 1
+                               -> [thread 1] -> Linux CPU 5
+
+       [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
+                               -> [thread 1] -> Linux CPU 6
+                   -> [core 1] -> [thread 0] -> Linux CPU 3
+                               -> [thread 1] -> Linux CPU 7
+
+      AMD nomenclature for CMT systems:
+
+       [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
+                                    -> [Compute Unit Core 1] -> Linux CPU 1
+                -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
+                                    -> [Compute Unit Core 1] -> Linux CPU 3
+
+       [node 1] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 4
+                                    -> [Compute Unit Core 1] -> Linux CPU 5
+                -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 6
+                                    -> [Compute Unit Core 1] -> Linux CPU 7
index c518dce7da4d62da22b192cbc3595ebf62ae3037..5aa738346062887731679d1376747f2e99db93e9 100644 (file)
@@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
+ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
@@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
 the processes using the page fault handler, with init_level4_pgt as
 reference.
 
-Current X86-64 implementations only support 40 bits of address space,
-but we support up to 46 bits. This expands into MBZ space in the page tables.
+Current X86-64 implementations support up to 46 bits of address space (64 TB),
+which is our current limit. This expands into MBZ space in the page tables.
 
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
index 67d99dd0e2e5085e787223b869f76c1fca448e15..b57df66532d2828a5e42ccfd7284511726e68334 100644 (file)
@@ -872,9 +872,9 @@ F:  drivers/perf/arm_pmu.c
 F:     include/linux/perf/arm_pmu.h
 
 ARM PORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/
 
@@ -886,35 +886,35 @@ F:        arch/arm/plat-*/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM PRIMECELL AACI PL041 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     sound/arm/aaci.*
 
 ARM PRIMECELL CLCD PL110 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/video/fbdev/amba-clcd.*
 
 ARM PRIMECELL KMI PL050 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/input/serio/ambakmi.*
 F:     include/linux/amba/kmi.h
 
 ARM PRIMECELL MMCI PL180/1 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/mmc/host/mmci.*
 F:     include/linux/amba/mmci.h
 
 ARM PRIMECELL UART PL010 AND PL011 DRIVERS
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/tty/serial/amba-pl01*.c
 F:     include/linux/amba/serial.h
 
 ARM PRIMECELL BUS SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/amba/
 F:     include/linux/amba/bus.h
@@ -1036,7 +1036,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
 ARM/CLKDEV SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/include/asm/clkdev.h
@@ -1093,9 +1093,9 @@ F:        arch/arm/boot/dts/cx92755*
 N:     digicolor
 
 ARM/EBSA110 MACHINE SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/mach-ebsa110/
 F:     drivers/net/ethernet/amd/am79c961a.*
@@ -1124,9 +1124,9 @@ T:        git git://git.berlios.de/gemini-board
 F:     arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/dec21285.h
 F:     arch/arm/mach-footbridge/
@@ -1457,7 +1457,7 @@ S:        Maintained
 ARM/PT DIGITAL BOARD PORT
 M:     Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 
 ARM/QUALCOMM SUPPORT
@@ -1493,9 +1493,9 @@ S:        Supported
 F:     arch/arm64/boot/dts/renesas/
 
 ARM/RISCPC ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/entry-macro-iomd.S
 F:     arch/arm/include/asm/hardware/ioc.h
@@ -1773,9 +1773,9 @@ F:        drivers/clk/versatile/clk-vexpress-osc.c
 F:     drivers/clocksource/versatile.c
 
 ARM/VFP SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/vfp/
 
@@ -2203,10 +2203,13 @@ BATMAN ADVANCED
 M:     Marek Lindner <mareklindner@neomailbox.ch>
 M:     Simon Wunderlich <sw@simonwunderlich.de>
 M:     Antonio Quartulli <a@unstable.cc>
-L:     b.a.t.m.a.n@lists.open-mesh.org
+L:     b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers)
 W:     https://www.open-mesh.org/
 Q:     https://patchwork.open-mesh.org/project/batman/list/
 S:     Maintained
+F:     Documentation/ABI/testing/sysfs-class-net-batman-adv
+F:     Documentation/ABI/testing/sysfs-class-net-mesh
+F:     Documentation/networking/batman-adv.txt
 F:     net/batman-adv/
 
 BAYCOM/HDLCDRV DRIVERS FOR AX.25
@@ -2921,7 +2924,7 @@ F:        mm/cleancache.c
 F:     include/linux/cleancache.h
 
 CLK API
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-clk@vger.kernel.org
 S:     Maintained
 F:     include/linux/clk.h
@@ -3355,9 +3358,9 @@ S:        Supported
 F:     drivers/net/ethernet/stmicro/stmmac/
 
 CYBERPRO FB DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     drivers/video/fbdev/cyber2000fb.*
 
@@ -3882,7 +3885,7 @@ F:        Documentation/devicetree/bindings/display/st,stih4xx.txt
 
 DRM DRIVERS FOR VIVANTE GPU IP
 M:     Lucas Stach <l.stach@pengutronix.de>
-R:     Russell King <linux+etnaviv@arm.linux.org.uk>
+R:     Russell King <linux+etnaviv@armlinux.org.uk>
 R:     Christian Gmeiner <christian.gmeiner@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
@@ -4224,8 +4227,8 @@ F:        Documentation/efi-stub.txt
 F:     arch/ia64/kernel/efi.c
 F:     arch/x86/boot/compressed/eboot.[ch]
 F:     arch/x86/include/asm/efi.h
-F:     arch/x86/platform/efi/*
-F:     drivers/firmware/efi/*
+F:     arch/x86/platform/efi/
+F:     drivers/firmware/efi/
 F:     include/linux/efi*.h
 
 EFI VARIABLE FILESYSTEM
@@ -4303,7 +4306,7 @@ F:        drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
 M:     Stephen Hemminger <stephen@networkplumber.org>
-L:     bridge@lists.linux-foundation.org
+L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
 S:     Maintained
@@ -4745,7 +4748,7 @@ F:        drivers/platform/x86/fujitsu-tablet.c
 
 FUSE: FILESYSTEM IN USERSPACE
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     fuse-devel@lists.sourceforge.net
+L:     linux-fsdevel@vger.kernel.org
 W:     http://fuse.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
 S:     Maintained
@@ -4904,7 +4907,7 @@ F:        net/ipv4/gre_offload.c
 F:     include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
-M:     Kristoffer Glembo <kristoffer@gaisler.com>
+M:     Andreas Larsson <andreas@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/aeroflex/
@@ -5043,6 +5046,7 @@ F:        include/linux/hw_random.h
 HARDWARE SPINLOCK CORE
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
 F:     Documentation/hwspinlock.txt
@@ -5744,14 +5748,7 @@ F:       drivers/char/hw_random/ixp4xx-rng.c
 
 INTEL ETHERNET DRIVERS
 M:     Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-R:     Jesse Brandeburg <jesse.brandeburg@intel.com>
-R:     Shannon Nelson <shannon.nelson@intel.com>
-R:     Carolyn Wyborny <carolyn.wyborny@intel.com>
-R:     Don Skidmore <donald.c.skidmore@intel.com>
-R:     Bruce Allan <bruce.w.allan@intel.com>
-R:     John Ronciak <john.ronciak@intel.com>
-R:     Mitch Williams <mitch.a.williams@intel.com>
-L:     intel-wired-lan@lists.osuosl.org
+L:     intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
 W:     http://www.intel.com/support/feedback.htm
 W:     http://e1000.sourceforge.net/
 Q:     http://patchwork.ozlabs.org/project/intel-wired-lan/list/
@@ -6027,7 +6024,7 @@ F:        include/scsi/*iscsi*
 
 ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
 M:     Or Gerlitz <ogerlitz@mellanox.com>
-M:     Sagi Grimberg <sagig@mellanox.com>
+M:     Sagi Grimberg <sagi@grimberg.me>
 M:     Roi Dayan <roid@mellanox.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -6037,7 +6034,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/ulp/iser/
 
 ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
-M:     Sagi Grimberg <sagig@mellanox.com>
+M:     Sagi Grimberg <sagi@grimberg.me>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 L:     linux-rdma@vger.kernel.org
 L:     target-devel@vger.kernel.org
@@ -6252,8 +6249,8 @@ S:        Maintained
 F:     tools/testing/selftests
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Gleb Natapov <gleb@kernel.org>
 M:     Paolo Bonzini <pbonzini@redhat.com>
+M:     Radim Krčmář <rkrcmar@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://www.linux-kvm.org
 T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -6400,10 +6397,10 @@ F:      mm/kmemleak.c
 F:     mm/kmemleak-test.c
 
 KPROBES
-M:     Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+M:     Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
 M:     Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 M:     "David S. Miller" <davem@davemloft.net>
-M:     Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+M:     Masami Hiramatsu <mhiramat@kernel.org>
 S:     Maintained
 F:     Documentation/kprobes.txt
 F:     include/linux/kprobes.h
@@ -6905,7 +6902,7 @@ L:        linux-man@vger.kernel.org
 S:     Maintained
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Maintained
 F:     drivers/gpu/drm/armada/
 
@@ -7576,7 +7573,7 @@ F:        drivers/infiniband/hw/nes/
 
 NETEM NETWORK EMULATOR
 M:     Stephen Hemminger <stephen@networkplumber.org>
-L:     netem@lists.linux-foundation.org
+L:     netem@lists.linux-foundation.org (moderated for non-subscribers)
 S:     Maintained
 F:     net/sched/sch_netem.c
 
@@ -7905,7 +7902,7 @@ S:        Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Supported
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
@@ -7978,7 +7975,7 @@ F:        arch/arm/*omap*/*pm*
 F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M:     Rajendra Nayak <rnayak@ti.com>
+M:     Rajendra Nayak <rnayak@codeaurora.org>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
@@ -8712,6 +8709,8 @@ F:        drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
+M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -9140,6 +9139,13 @@ T:       git git://github.com/KrasnikovEugene/wcn36xx.git
 S:     Supported
 F:     drivers/net/wireless/ath/wcn36xx/
 
+QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
+M:     Gabriel Somlo <somlo@cmu.edu>
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+L:     qemu-devel@nongnu.org
+S:     Maintained
+F:     drivers/firmware/qemu_fw_cfg.c
+
 RADOS BLOCK DEVICE (RBD)
 M:     Ilya Dryomov <idryomov@gmail.com>
 M:     Sage Weil <sage@redhat.com>
@@ -9315,6 +9321,7 @@ F:        include/linux/regmap.h
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
 S:     Maintained
 F:     drivers/remoteproc/
@@ -9324,6 +9331,7 @@ F:        include/linux/remoteproc.h
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
 S:     Maintained
 F:     drivers/rpmsg/
@@ -9478,7 +9486,7 @@ F:        drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
 RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 M:     Jes Sorensen <Jes.Sorensen@redhat.com>
 L:     linux-wireless@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl8xxxu/
 
@@ -10003,7 +10011,8 @@ F:      drivers/infiniband/hw/ocrdma/
 
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:     Shradha Shah <sshah@solarflare.com>
+M:     Edward Cree <ecree@solarflare.com>
+M:     Bert Kenward <bkenward@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
@@ -10584,6 +10593,14 @@ L:     linux-tegra@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/nvec/
 
+STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
+M:     Jens Frederich <jfrederich@gmail.com>
+M:     Daniel Drake <dsd@laptop.org>
+M:     Jon Nettleton <jon.nettleton@gmail.com>
+W:     http://wiki.laptop.org/go/DCON
+S:     Maintained
+F:     drivers/staging/olpc_dcon/
+
 STAGING - REALTEK RTL8712U DRIVERS
 M:     Larry Finger <Larry.Finger@lwfinger.net>
 M:     Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@ -11052,6 +11069,15 @@ S:     Maintained
 F:     drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
+TI ETHERNET SWITCH DRIVER (CPSW)
+M:     Mugunthan V N <mugunthanvnm@ti.com>
+R:     Grygorii Strashko <grygorii.strashko@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/ti/cpsw*
+F:     drivers/net/ethernet/ti/davinci*
+
 TI FLASH MEDIA INTERFACE DRIVER
 M:     Alex Dubov <oakad@yahoo.com>
 S:     Maintained
@@ -12203,9 +12229,9 @@ S:      Maintained
 F:     drivers/media/tuners/tuner-xc2028.*
 
 XEN HYPERVISOR INTERFACE
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 M:     Boris Ostrovsky <boris.ostrovsky@oracle.com>
 M:     David Vrabel <david.vrabel@citrix.com>
+M:     Juergen Gross <jgross@suse.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
 S:     Supported
@@ -12217,16 +12243,16 @@ F:    include/xen/
 F:     include/uapi/xen/
 
 XEN HYPERVISOR ARM
-M:     Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 F:     arch/arm/xen/
 F:     arch/arm/include/asm/xen/
 
 XEN HYPERVISOR ARM64
-M:     Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 F:     arch/arm64/xen/
 F:     arch/arm64/include/asm/xen/
 
index 916b26e999d8f15190afd6136bf0b357eb658c28..acf6155421cc244913b71c6d3b95cd17690e64ff 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Blurry Fish Butt
+EXTRAVERSION = -rc7
+NAME = Charred Weasel
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
 prepare: prepare0 prepare-objtool
 
 ifdef CONFIG_STACK_VALIDATION
-  has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
+  has_libelf := $(call try-run,\
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
index 208aae071b378550fa47ade47332ec9351f62e43..a8767430df7d6012463c8a34366d251496d15364 100644 (file)
@@ -35,8 +35,10 @@ config ARC
        select NO_BOOTMEM
        select OF
        select OF_EARLY_FLATTREE
+       select OF_RESERVED_MEM
        select PERF_USE_VMALLOC
        select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_GENERIC_DMA_COHERENT
 
 config MIGHT_HAVE_PCI
        bool
@@ -56,6 +58,9 @@ config GENERIC_CSUM
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
+config ARCH_DISCONTIGMEM_ENABLE
+       def_bool y
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
 
@@ -345,6 +350,15 @@ config ARC_HUGEPAGE_16M
 
 endchoice
 
+config NODES_SHIFT
+       int "Maximum NUMA Nodes (as a power of 2)"
+       default "1" if !DISCONTIGMEM
+       default "2" if DISCONTIGMEM
+       depends on NEED_MULTIPLE_NODES
+       ---help---
+         Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
+         zones.
+
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
@@ -453,6 +467,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
+       select DISCONTIGMEM
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
@@ -593,7 +608,6 @@ config PCI_SYSCALL
        def_bool PCI
 
 source "drivers/pci/Kconfig"
-source "drivers/pci/pcie/Kconfig"
 
 endmenu
 
index ab5d5701e11d448200d3010df229c9f5f57875d9..44a578c10732cd1ab79558415e3c66f6825c98bd 100644 (file)
                        clocks = <&apbclk>;
                        clock-names = "stmmaceth";
                        max-speed = <100>;
-                       mdio0 {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               compatible = "snps,dwmac-mdio";
-                               phy1: ethernet-phy@1 {
-                                       reg = <1>;
-                               };
-                       };
                };
 
                ehci@0x40000 {
index f8b396c9aedb77e23da95d5cd10c0272102b13fe..491b3b5f22bdcad4e787f9770448de4544241a53 100644 (file)
@@ -42,6 +42,7 @@ CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
index 56128ea2b748783a5f0011a62cca2c6f5c9f7218..b25ee73b2e79a7967bfe9c31b0949ff9ece52b4d 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644 (file)
index 0000000..bd3f68c
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+                               unsigned long off)
+{
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+       return 0;
+}
+
+#endif /* _ASM_FB_H_ */
index 17f85c9c73cfe830a40280ad79bf9decb1b65a17..c22b181e8206f3162c4e0e19214f8b303f13c576 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
                                  unsigned long flags);
@@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr);
 #define ioremap_wc(phy, sz)            ioremap(phy, sz)
 #define ioremap_wt(phy, sz)            ioremap(phy, sz)
 
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p)          ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p)          ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p)       ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)       ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
 /* Change struct page to physical address */
 #define page_to_phys(page)             (page_to_pfn(page) << PAGE_SHIFT)
 
@@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb()              rmb()
-#define __iowmb()              wmb()
-#else
-#define __iormb()              do { } while (0)
-#define __iowmb()              do { } while (0)
-#endif
-
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
index 37c2f751eebf03d8f9f16d997bacdcb5b8f6323f..d1ec7f6b31e0d986fe9fca340c5c46935008db70 100644 (file)
 #define STATUS_AD_MASK         (1<<STATUS_AD_BIT)
 #define STATUS_IE_MASK         (1<<STATUS_IE_BIT)
 
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT     4
+
+#define CLRI_STATUS_E_MASK     0xF
+#define CLRI_STATUS_IE_MASK    (1 << CLRI_STATUS_IE_BIT)
+
 #define AUX_USER_SP            0x00D
 #define AUX_IRQ_CTRL           0x00E
 #define AUX_IRQ_ACT            0x043   /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
        :
        : "memory");
 
+       /* To be compatible with irq_save()/irq_restore()
+        * encode the irq bits as expected by CLRI/SETI
+        * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+        */
+       temp = (1 << 5) |
+               ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+               (temp & CLRI_STATUS_E_MASK);
        return temp;
 }
 
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
  */
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-       return !(flags & (STATUS_IE_MASK));
+       return !(flags & CLRI_STATUS_IE_MASK);
 }
 
 static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
 
 #else
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+       bl      trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+       bl      trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
 .macro IRQ_DISABLE  scratch
        clri
+       TRACE_ASM_IRQ_DISABLE
 .endm
 
 .macro IRQ_ENABLE  scratch
+       TRACE_ASM_IRQ_ENABLE
        seti
 .endm
 
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
new file mode 100644 (file)
index 0000000..8e97136
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMZONE_H
+#define _ASM_ARC_MMZONE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data node_data[];
+#define NODE_DATA(nid) (&node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+       int is_end_low = 1;
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
+               is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
+
+       /*
+        * node 0: lowmem:             0x8000_0000   to 0xFFFF_FFFF
+        * node 1: HIGHMEM w/o  PAE40: 0x0           to 0x7FFF_FFFF
+        *         HIGHMEM with PAE40: 0x1_0000_0000 to ...
+        */
+       if (pfn >= ARCH_PFN_OFFSET && is_end_low)
+               return 0;
+
+       return 1;
+}
+
+static inline int pfn_valid(unsigned long pfn)
+{
+       int nid = pfn_to_nid(pfn);
+
+       return (pfn <= node_end_pfn(nid));
+}
+#endif /* CONFIG_DISCONTIGMEM  */
+
+#endif
index 36da89e2c853ef0551bfcee0f145454e23a81dd6..0d53854884d047557a981fece15936715a4cd950 100644 (file)
@@ -72,11 +72,20 @@ typedef unsigned long pgprot_t;
 
 typedef pte_t * pgtable_t;
 
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 
 #define ARCH_PFN_OFFSET                virt_to_pfn(CONFIG_LINUX_LINK_BASE)
 
+#ifdef CONFIG_FLATMEM
 #define pfn_valid(pfn)         (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#endif
 
 /*
  * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
@@ -85,12 +94,10 @@ typedef pte_t * pgtable_t;
  * virt here means link-address/program-address as embedded in object code.
  * And for ARC, link-addr = physical address
  */
-#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __pa(vaddr)  ((unsigned long)(vaddr))
 #define __va(paddr)  ((void *)((unsigned long)(paddr)))
 
-#define virt_to_page(kaddr)    \
-       (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
-
+#define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
index 7d6c93e63adf3af60a0642b03b3ae798061b1e65..10d4b8b8e5450e83468f96f16d9ad18b5c62d3b0 100644 (file)
@@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 #define pmd_present(x)                 (pmd_val(x))
 #define pmd_clear(xp)                  do { pmd_val(*(xp)) = 0; } while (0)
 
-#define pte_page(pte)  \
-       (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
-
+#define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte)           virt_to_pfn(pte_val(pte))
-#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
-                                pgprot_val(prot)))
-#define __pte_index(addr)      (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
+#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+#define __pte_index(addr)      (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
 /*
  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
index c1264607bbff3dca457fd47b8f078b62993b7af7..7a1c124ff021d53377d24ff5d1bb9cccbb2831de 100644 (file)
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
 
        clri            ; To make status32.IE agree with CPU internal state
 
-       lr  r0, [ICAUSE]
+#ifdef CONFIG_TRACE_IRQFLAGS
+       TRACE_ASM_IRQ_DISABLE
+#endif
 
+       lr  r0, [ICAUSE]
        mov   blink, ret_from_exception
 
        b.d  arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
 
 .Lrestore_regs:
 
+       # Interrpts are actually disabled from this point on, but will get
+       # reenabled after we return from interrupt/exception.
+       # But irq tracer needs to be told now...
+       TRACE_ASM_IRQ_ENABLE
+
        ld      r0, [sp, PT_status32]   ; U/K mode at time of entry
        lr      r10, [AUX_IRQ_ACT]
 
index 431433929189c8b63e8b3efe41ef03030fa8b86f..0cb0abaa0479e53ab1ea7ef8e45ab0cd42d2e965 100644 (file)
@@ -341,6 +341,9 @@ END(call_do_page_fault)
 
 .Lrestore_regs:
 
+       # Interrpts are actually disabled from this point on, but will get
+       # reenabled after we return from interrupt/exception.
+       # But irq tracer needs to be told now...
        TRACE_ASM_IRQ_ENABLE
 
        lr      r10, [status32]
index d7709e3930a3dcaaa848affcb65d65ad946f1c8e..9e5eddbb856f1f32dd25f4b13132dd527725249c 100644 (file)
@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
 
                /* kernel reading from page with U-mapping */
                phys_addr_t paddr = (unsigned long)page_address(page);
-               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+               unsigned long vaddr = page->index << PAGE_SHIFT;
 
                if (addr_not_cache_congruent(paddr, vaddr))
                        __flush_dcache_page(paddr, vaddr);
index 7d2c4fbf4f22eb1402bc666c077c652c40f38361..8be930394750a7cf7039a9dcc85cf6ee1994f4e6 100644 (file)
@@ -13,6 +13,7 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 #include <linux/initrd.h>
 #endif
+#include <linux/of_fdt.h>
 #include <linux/swap.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
@@ -29,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
 static unsigned long low_mem_sz;
 
 #ifdef CONFIG_HIGHMEM
-static unsigned long min_high_pfn;
+static unsigned long min_high_pfn, max_high_pfn;
 static u64 high_mem_start;
 static u64 high_mem_sz;
 #endif
 
+#ifdef CONFIG_DISCONTIGMEM
+struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+#endif
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
@@ -108,13 +114,11 @@ void __init setup_arch_memory(void)
        /* Last usable page of low mem */
        max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
 
-#ifdef CONFIG_HIGHMEM
-       min_high_pfn = PFN_DOWN(high_mem_start);
-       max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#ifdef CONFIG_FLATMEM
+       /* pfn_valid() uses this */
+       max_mapnr = max_low_pfn - min_low_pfn;
 #endif
 
-       max_mapnr = max_pfn - min_low_pfn;
-
        /*------------- bootmem allocator setup -----------------------*/
 
        /*
@@ -128,7 +132,7 @@ void __init setup_arch_memory(void)
         * the crash
         */
 
-       memblock_add(low_mem_start, low_mem_sz);
+       memblock_add_node(low_mem_start, low_mem_sz, 0);
        memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -136,6 +140,9 @@ void __init setup_arch_memory(void)
                memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
 #endif
 
+       early_init_fdt_reserve_self();
+       early_init_fdt_scan_reserved_mem();
+
        memblock_dump_all();
 
        /*----------------- node/zones setup --------------------------*/
@@ -145,13 +152,6 @@ void __init setup_arch_memory(void)
        zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
        zones_holes[ZONE_NORMAL] = 0;
 
-#ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
-
-       /* This handles the peripheral address space hole */
-       zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
-#endif
-
        /*
         * We can't use the helper free_area_init(zones[]) because it uses
         * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
@@ -164,6 +164,34 @@ void __init setup_arch_memory(void)
                            zones_holes);       /* holes */
 
 #ifdef CONFIG_HIGHMEM
+       /*
+        * Populate a new node with highmem
+        *
+        * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+        * than addresses in normal ala low memory (0x8000_0000 based).
+        * Even with PAE, the huge peripheral space hole would waste a lot of
+        * mem with single mem_map[]. This warrants a mem_map per region design.
+        * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
+        *
+        * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
+        * populated with normal memory zone while node 1 only has highmem
+        */
+       node_set_online(1);
+
+       min_high_pfn = PFN_DOWN(high_mem_start);
+       max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+       zones_size[ZONE_NORMAL] = 0;
+       zones_holes[ZONE_NORMAL] = 0;
+
+       zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
+       zones_holes[ZONE_HIGHMEM] = 0;
+
+       free_area_init_node(1,                  /* node-id */
+                           zones_size,         /* num pages per zone */
+                           min_high_pfn,       /* first pfn of node */
+                           zones_holes);       /* holes */
+
        high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
        kmap_init();
 #endif
@@ -181,7 +209,7 @@ void __init mem_init(void)
        unsigned long tmp;
 
        reset_all_zones_managed_pages();
-       for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+       for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
                free_highmem_page(pfn_to_page(tmp));
 #endif
 
index 6c667fb3544923fb32704bdf4c22ff587c016f5d..4e28d87e935637140af712abf5a661f338437db9 100644 (file)
 };
 
 &cpsw_emac0 {
-       phy_id = <&davinci_mdio>, <0>;
        phy-mode = "rmii";
        dual_emac_res_vlan = <1>;
+       fixed-link {
+               speed = <100>;
+               full-duplex;
+       };
 };
 
 &cpsw_emac1 {
index 55ca9c7dcf6aacf6f7a6702a266dc8725be3a4c7..0467846b4cc33cd0e5bf22c1973407fdc38248a9 100644 (file)
                        ti,no-idle-on-init;
                        reg = <0x50000000 0x2000>;
                        interrupts = <100>;
-                       dmas = <&edma 52>;
+                       dmas = <&edma 52 0>;
                        dma-names = "rxtx";
                        gpmc,num-cs = <7>;
                        gpmc,num-waitpins = <2>;
index 6e4f5af3d8f8b607bc0bce45cd419169b0f7945d..ba580a9da390fb3dc1a723b7b77da08e6600e96e 100644 (file)
                        ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
                                   <&edma_tptc2 0>;
 
-                       ti,edma-memcpy-channels = <32 33>;
+                       ti,edma-memcpy-channels = <58 59>;
                };
 
                edma_tptc0: tptc@49800000 {
                gpmc: gpmc@50000000 {
                        compatible = "ti,am3352-gpmc";
                        ti,hwmods = "gpmc";
-                       dmas = <&edma 52>;
+                       dmas = <&edma 52 0>;
                        dma-names = "rxtx";
                        clocks = <&l3s_gclk>;
                        clock-names = "fck";
index 83dfafaaba1bc019fab7432c8003930c7f8ae1e6..d5dd72047a7ed4a4ea102a571085a2e58c47c0b6 100644 (file)
        tx-num-evt = <32>;
        rx-num-evt = <32>;
 };
+
+&synctimer_32kclk {
+       assigned-clocks = <&mux_synctimer32k_ck>;
+       assigned-clock-parents = <&clkdiv32k_ick>;
+};
index 0a5fc5d02ce2ba9893ac30b6f9997df08d04eb65..4168eb9dd3698c827e8707007ecca0d0f918349a 100644 (file)
                #cooling-cells = <2>;
        };
 
-       extcon_usb1: extcon_usb1 {
-               compatible = "linux,extcon-usb-gpio";
-               id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
-               pinctrl-names = "default";
-               pinctrl-0 = <&extcon_usb1_pins>;
-       };
-
        hdmi0: connector {
                compatible = "hdmi-connector";
                label = "hdmi";
                >;
        };
 
-       extcon_usb1_pins: extcon_usb1_pins {
-               pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
-               >;
-       };
-
        tpd12s015_pins: pinmux_tpd12s015_pins {
                pinctrl-single,pins = <
                        DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14)              /* gpio7_10 CT_CP_HPD */
        pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
 &omap_dwc3_2 {
        extcon = <&extcon_usb2>;
 };
index 3710755c6d76b52fe339ea8f8f71d7dd8a1dc0ec..85d2c377c3322f6be3a612d85661534771896153 100644 (file)
                        };
 
                        /* USB part of the eSATA/USB 2.0 port */
-                       usb@50000 {
+                       usb@58000 {
                                status = "okay";
                        };
 
index e0ea6a93a22ed722392295828c21802ac54dae2d..792a64ee0df7fac5d201863ecffb80d7381fec17 100644 (file)
@@ -4,6 +4,157 @@
  * published by the Free Software Foundation.
  */
 
+&pllss {
+       /*
+        * See TRM "2.6.10 Connected outputso DPLLS" and
+        * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
+        * connected except for hdmi and usb.
+        */
+       adpll_mpu_ck: adpll@40 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-s-clock";
+               reg = <0x40 0x40>;
+               clocks = <&devosc_ck &devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow", "clkinphif";
+               clock-output-names = "481c5040.adpll.dcoclkldo",
+                                    "481c5040.adpll.clkout",
+                                    "481c5040.adpll.clkoutx2",
+                                    "481c5040.adpll.clkouthif";
+       };
+
+       adpll_dsp_ck: adpll@80 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x80 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5080.adpll.dcoclkldo",
+                                    "481c5080.adpll.clkout",
+                                    "481c5080.adpll.clkoutldo";
+       };
+
+       adpll_sgx_ck: adpll@b0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0xb0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c50b0.adpll.dcoclkldo",
+                                    "481c50b0.adpll.clkout",
+                                    "481c50b0.adpll.clkoutldo";
+       };
+
+       adpll_hdvic_ck: adpll@e0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0xe0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c50e0.adpll.dcoclkldo",
+                                    "481c50e0.adpll.clkout",
+                                    "481c50e0.adpll.clkoutldo";
+       };
+
+       adpll_l3_ck: adpll@110 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x110 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5110.adpll.dcoclkldo",
+                                    "481c5110.adpll.clkout",
+                                    "481c5110.adpll.clkoutldo";
+       };
+
+       adpll_isp_ck: adpll@140 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x140 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5140.adpll.dcoclkldo",
+                                    "481c5140.adpll.clkout",
+                                    "481c5140.adpll.clkoutldo";
+       };
+
+       adpll_dss_ck: adpll@170 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x170 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5170.adpll.dcoclkldo",
+                                    "481c5170.adpll.clkout",
+                                    "481c5170.adpll.clkoutldo";
+       };
+
+       adpll_video0_ck: adpll@1a0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x1a0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c51a0.adpll.dcoclkldo",
+                                    "481c51a0.adpll.clkout",
+                                    "481c51a0.adpll.clkoutldo";
+       };
+
+       adpll_video1_ck: adpll@1d0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x1d0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c51d0.adpll.dcoclkldo",
+                                    "481c51d0.adpll.clkout",
+                                    "481c51d0.adpll.clkoutldo";
+       };
+
+       adpll_hdmi_ck: adpll@200 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x200 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5200.adpll.dcoclkldo",
+                                    "481c5200.adpll.clkout",
+                                    "481c5200.adpll.clkoutldo";
+       };
+
+       adpll_audio_ck: adpll@230 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x230 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5230.adpll.dcoclkldo",
+                                    "481c5230.adpll.clkout",
+                                    "481c5230.adpll.clkoutldo";
+       };
+
+       adpll_usb_ck: adpll@260 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x260 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5260.adpll.dcoclkldo",
+                                    "481c5260.adpll.clkout",
+                                    "481c5260.adpll.clkoutldo";
+       };
+
+       adpll_ddr_ck: adpll@290 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x290 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5290.adpll.dcoclkldo",
+                                    "481c5290.adpll.clkout",
+                                    "481c5290.adpll.clkoutldo";
+       };
+};
+
 &pllss_clocks {
        timer1_fck: timer1_fck {
                #clock-cells = <0>;
                reg = <0x2e0>;
        };
 
+       /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
+       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&adpll_video0_ck 1
+                         &adpll_video1_ck 1
+                         &adpll_audio_ck 1>;
+               ti,bit-shift = <1>;
+               reg = <0x2e8>;
+       };
+
+       /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
+       cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <125000000>;
+       };
+
        sysclk18_ck: sysclk18_ck {
                #clock-cells = <0>;
                compatible = "ti,mux-clock";
                compatible = "fixed-clock";
                clock-frequency = <1000000000>;
        };
-
-       sysclk4_ck: sysclk4_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <222000000>;
-       };
-
-       sysclk6_ck: sysclk6_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <100000000>;
-       };
-
-       sysclk10_ck: sysclk10_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <48000000>;
-       };
-
-        cpsw_125mhz_gclk: cpsw_125mhz_gclk {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <125000000>;
-       };
-
-       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <250000000>;
-       };
-
 };
 
 &prcm_clocks {
                clock-div = <78125>;
        };
 
+       /* L4_HS 220 MHz*/
+       sysclk4_ck: sysclk4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <1>;
+       };
+
+       /* L4_FWCFG */
+       sysclk5_ck: sysclk5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <2>;
+       };
+
+       /* L4_LS 110 MHz */
+       sysclk6_ck: sysclk6_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <2>;
+       };
+
+       sysclk8_ck: sysclk8_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_usb_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <1>;
+       };
+
+       sysclk10_ck: sysclk10_ck {
+               compatible = "ti,divider-clock";
+               reg = <0x324>;
+               ti,max-div = <7>;
+               #clock-cells = <0>;
+               clocks = <&adpll_usb_ck 1>;
+       };
+
        aud_clkin0_ck: aud_clkin0_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
index 6f98dc8df9dd1fc430adb66e562d4e8bdd1afc9a..0e49741747efb80ffe4a87764713f6398dec58fd 100644 (file)
@@ -6,6 +6,32 @@
 
 #include "dm814x-clocks.dtsi"
 
+/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
+&adpll_hdvic_ck {
+       status = "disabled";
+};
+
+&adpll_l3_ck {
+       status = "disabled";
+};
+
+&adpll_dss_ck {
+       status = "disabled";
+};
+
+/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
+&sysclk4_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk5_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk6_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
 /*
  * Compared to dm814x, dra62x has different shifts and more mux options.
  * Please add the extra options for ysclk_14 and 16 if really needed.
index d0bae06b7eb7e8600f75213a576b778704ecacff..ef2164a99d0f0202a8f2c021c626dc55ec3f1390 100644 (file)
                clock-frequency = <32768>;
        };
 
-       sys_32k_ck: sys_32k_ck {
+       sys_clk32_crystal_ck: sys_clk32_crystal_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
                clock-frequency = <32768>;
        };
 
+       sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin1>;
+               clock-mult = <1>;
+               clock-div = <610>;
+       };
+
        virt_12000000_ck: virt_12000000_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
                ti,bit-shift = <22>;
                reg = <0x0558>;
        };
+
+       sys_32k_ck: sys_32k_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x6c4>;
+       };
 };
index a2ddcb8c545a01ebdcc86d3c011e348f5295619b..45619f6162c56a6260b7d832678b4cc94b019de0 100644 (file)
@@ -91,8 +91,8 @@
                clock-frequency = <141666666>;
        };
 
-       pinctrl: pinctrl@c1109880 {
-               compatible = "amlogic,meson8-pinctrl";
+       pinctrl_cbus: pinctrl@c1109880 {
+               compatible = "amlogic,meson8-cbus-pinctrl";
                reg = <0xc1109880 0x10>;
                #address-cells = <1>;
                #size-cells = <1>;
                        #gpio-cells = <2>;
                };
 
-               gpio_ao: ao-bank@c1108030 {
-                       reg = <0xc8100014 0x4>,
-                             <0xc810002c 0x4>,
-                             <0xc8100024 0x8>;
-                       reg-names = "mux", "pull", "gpio";
-                       gpio-controller;
-                       #gpio-cells = <2>;
-               };
-
-               uart_ao_a_pins: uart_ao_a {
-                       mux {
-                               groups = "uart_tx_ao_a", "uart_rx_ao_a";
-                               function = "uart_ao";
-                       };
-               };
-
-               i2c_ao_pins: i2c_mst_ao {
-                       mux {
-                               groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
-                               function = "i2c_mst_ao";
-                       };
-               };
-
                spi_nor_pins: nor {
                        mux {
                                groups = "nor_d", "nor_q", "nor_c", "nor_cs";
                };
        };
 
+       pinctrl_aobus: pinctrl@c8100084 {
+               compatible = "amlogic,meson8-aobus-pinctrl";
+               reg = <0xc8100084 0xc>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               gpio_ao: ao-bank@c1108030 {
+                       reg = <0xc8100014 0x4>,
+                             <0xc810002c 0x4>,
+                             <0xc8100024 0x8>;
+                       reg-names = "mux", "pull", "gpio";
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               uart_ao_a_pins: uart_ao_a {
+                       mux {
+                               groups = "uart_tx_ao_a", "uart_rx_ao_a";
+                               function = "uart_ao";
+                       };
+               };
+
+               i2c_ao_pins: i2c_mst_ao {
+                       mux {
+                               groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
+                               function = "i2c_mst_ao";
+                       };
+               };
+       };
 }; /* end of / */
index 8bad5571af461e86cabc9fe708db6865d0a95774..2bfe401a4da9181a8e029b9a073609ea5856c7fc 100644 (file)
                        reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
                };
 
-               pinctrl: pinctrl@c1109880 {
-                       compatible = "amlogic,meson8b-pinctrl";
+               pinctrl_cbus: pinctrl@c1109880 {
+                       compatible = "amlogic,meson8b-cbus-pinctrl";
                        reg = <0xc1109880 0x10>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                                gpio-controller;
                                #gpio-cells = <2>;
                        };
+               };
+
+               pinctrl_aobus: pinctrl@c8100084 {
+                       compatible = "amlogic,meson8b-aobus-pinctrl";
+                       reg = <0xc8100084 0xc>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges;
 
                        gpio_ao: ao-bank@c1108030 {
                                reg = <0xc8100014 0x4>,
index b3c26a96a7262bdc2356060f900cc9b0c8bbd8eb..d9e2d9c6e999e4aa6f3a4c8f582fe9caf74953f4 100644 (file)
        regulator-name = "V28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due to battery cover sensor */
 };
 
        regulator-name = "VCSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux3 {
        regulator-name = "VMMC2_30";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux4 {
        regulator-name = "VCAM_ANA_28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc1 {
        regulator-name = "VMMC1";
        regulator-min-microvolt = <1850000>;
        regulator-max-microvolt = <3150000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc2 {
        regulator-name = "V28_A";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due VIO leak to AIC34 VDDs */
 };
 
        regulator-name = "VPLL";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VSDI_CSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VMMC2_IO_18";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vio {
index 387dc31822fe9c8b2729d28da389d410ffb55fe7..96f8ce7bd2afc0aa5507d81cf98e59b4eddd61ec 100644 (file)
@@ -46,7 +46,7 @@
                               0x480bd800 0x017c>;
                        interrupts = <24>;
                        iommus = <&mmu_isp>;
-                       syscon = <&scm_conf 0xdc>;
+                       syscon = <&scm_conf 0x6c>;
                        ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>;
                        #clock-cells = <1>;
                        ports {
index 2bd9c83300b2bc3810da219e821f79e4385cc98d..421fe9f8a9ebd779fdf79044cde8be1b387cb0dc 100644 (file)
@@ -70,7 +70,7 @@
                compatible = "arm,cortex-a9-twd-timer";
                clocks = <&mpu_periphclk>;
                reg = <0x48240600 0x20>;
-               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_EDGE_RISING)>;
                interrupt-parent = <&gic>;
        };
 
index 902657d6713b073df82d33158d3d1cd0fac5ef16..914bf4c47404f641f823a122c26ff93127f8d25e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index ecc591dc0778ddbf77222d417884387511d31f0d..4d87d9c6c86d85ec32ae1bc4444fdd06ed5b317e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index 38805ebbe2ba49f5dcd2ee92d80cde1e324cbb56..120b6b80cd39eacc2c874768e89eb14ab147b531 100644 (file)
                        omap5_pmx_wkup: pinmux@c840 {
                                compatible = "ti,omap5-padconf",
                                             "pinctrl-single";
-                               reg = <0xc840 0x0038>;
+                               reg = <0xc840 0x003c>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
index 65d0e8d9825947c68b06e53b162d10116df084c0..04f541bffbdd52d677c77cc717d2d82c63f3c549 100644 (file)
                };
 
                sata0: sata@29000000 {
-                       compatible              = "generic-ahci";
+                       compatible              = "qcom,apq8064-ahci", "generic-ahci";
                        status                  = "disabled";
                        reg                     = <0x29000000 0x180>;
                        interrupts              = <GIC_SPI 209 IRQ_TYPE_NONE>;
 
                        phys                    = <&sata_phy0>;
                        phy-names               = "sata-phy";
+                       ports-implemented       = <0x1>;
                };
 
                /* Temporary fixed regulator */
index ef53305784318a40d692fa7053a631abac28e552..8193139d0d8706037a29b5c03516f8d8dd15d12c 100644 (file)
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/clock/qcom,gcc-msm8974.h>
 #include "skeleton.dtsi"
 
                        clock-names = "core", "iface";
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
-                       dma-names = "tx", "rx";
                };
 
                spmi_bus: spmi@fc4cf000 {
                        interrupt-controller;
                        #interrupt-cells = <4>;
                };
-
-               blsp2_dma: dma-controller@f9944000 {
-                       compatible = "qcom,bam-v1.4.0";
-                       reg = <0xf9944000 0x19000>;
-                       interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&gcc GCC_BLSP2_AHB_CLK>;
-                       clock-names = "bam_clk";
-                       #dma-cells = <1>;
-                       qcom,ee = <0>;
-               };
        };
 
        smd {
index 0ad71b81d3a25f59aeaa3e329de0ac9f55c0cdad..cc6e28f81fe4f3721352deb9450f6c36a731b30c 100644 (file)
 };
 
 &pcie_bus_clk {
+       clock-frequency = <100000000>;
        status = "okay";
 };
 
index 6c08314427d6aab2e54d016319d8cc532bacdbdc..a9285d9a57cdf914b448d000f7db66f5f8846620 100644 (file)
 };
 
 &pfc {
-       pinctrl-0 = <&scif_clk_pins>;
-       pinctrl-names = "default";
-
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
        };
 
-       scif_clk_pins: scif_clk {
-               renesas,groups = "scif_clk";
-               renesas,function = "scif_clk";
-       };
-
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
-&scif_clk {
-       clock-frequency = <14745600>;
-       status = "okay";
-};
-
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
 };
 
 &pcie_bus_clk {
+       clock-frequency = <100000000>;
        status = "okay";
 };
 
index 6439f0569fe2c578bc26e66b97947fb7cf220dd9..1cd1b6a3a72a5c27f7aa43e930347bf7fb53158a 100644 (file)
                pcie_bus_clk: pcie_bus_clk {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <100000000>;
+                       clock-frequency = <0>;
                        clock-output-names = "pcie_bus";
-                       status = "disabled";
                };
 
                /* External SCIF clock */
                        #clock-cells = <0>;
                        /* This value must be overridden by the board. */
                        clock-frequency = <0>;
-                       status = "disabled";
                };
 
                /* External USB clock - can be overridden by the board */
                        /* This value must be overridden by the board. */
                        clock-frequency = <0>;
                        clock-output-names = "can_clk";
-                       status = "disabled";
                };
 
                /* Special CPG clocks */
index 9d2b7e2f5975ed3dae7d9d107a5a0ed5c80369e9..346a49d805a7becd411cda921c7508edb35a3aa8 100644 (file)
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index e11d99d529ee913c589f20d3988b5a0462b3373f..690352d3ba4c4eb01bab7e3d7a40e0b505ef467c 100644 (file)
@@ -91,10 +91,7 @@ CONFIG_SATA_AHCI=y
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
-CONFIG_NET_DSA_MV88E6171=y
-CONFIG_NET_DSA_MV88E6352=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_R8169=y
 CONFIG_MARVELL_PHY=y
index dc5797a2efab52f5ad701291ce68fda9ebc55996..6492407efd7e8822dc83780faaecad178eb699a8 100644 (file)
@@ -66,7 +66,7 @@ CONFIG_SATA_AHCI=y
 CONFIG_AHCI_MVEBU=y
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_DSA_MV88E6171=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
 CONFIG_MVPP2=y
index 6a5bc27538f1a9aead773b6c28525482b6ebe552..27a70a7a50f6c4fe5e8587ed551455d2205acf1f 100644 (file)
@@ -85,8 +85,7 @@ CONFIG_ATA=y
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_MII=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MARVELL_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
index 07055eacbb0f24a045b04bf61f923da9c7ebb43c..a691d590fbd142fcc15a8761e311dfa283ee363c 100644 (file)
@@ -63,6 +63,9 @@ CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_BU21013=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_RMI4_CORE=y
+CONFIG_RMI4_I2C=y
+CONFIG_RMI4_F11=y
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
index b23c6c81c9ad88df191354d926be53cf4810e8f8..1ee94c716a7f87f45c00436fb9b2fa640014c811 100644 (file)
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
        int feature = (features >> field) & 15;
 
        /* feature registers are signed values */
-       if (feature > 8)
+       if (feature > 7)
                feature -= 16;
 
        return feature;
index fc8ba1663601e0743a05b7cda52703df9f9bc07a..99d9f630d6b6838954bde8ddbdd90b0b5ace17b8 100644 (file)
@@ -84,6 +84,7 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_CPU_CP15_MMU
 static inline unsigned int get_domain(void)
 {
        unsigned int domain;
@@ -103,6 +104,16 @@ static inline void set_domain(unsigned val)
          : : "r" (val) : "memory");
        isb();
 }
+#else
+static inline unsigned int get_domain(void)
+{
+       return 0;
+}
+
+static inline void set_domain(unsigned val)
+{
+}
+#endif
 
 #ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)                                        \
index 7b84657fba3577ea3d29ecf1b93e3267feecbb59..194b6992338920680c14c46326999791e33defa6 100644 (file)
@@ -19,7 +19,7 @@
  * This may need to be greater than __NR_last_syscall+1 in order to
  * account for the padding in the syscall table
  */
-#define __NR_syscalls  (392)
+#define __NR_syscalls  (396)
 
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
index 5dd2528e9e45e369d0879185291e6981dbea15e4..2cb9dc770e1d41e8867f949e1ef13e028568a3d3 100644 (file)
 #define __NR_membarrier                        (__NR_SYSCALL_BASE+389)
 #define __NR_mlock2                    (__NR_SYSCALL_BASE+390)
 #define __NR_copy_file_range           (__NR_SYSCALL_BASE+391)
+#define __NR_preadv2                   (__NR_SYSCALL_BASE+392)
+#define __NR_pwritev2                  (__NR_SYSCALL_BASE+393)
 
 /*
  * The following SWIs are ARM private.
index dfc7cd6851ad4bbd09b11ae94ee6056f691b1fcb..703fa0f3cd8f812907b47ac7c84646ff3e3aff94 100644 (file)
                CALL(sys_execveat)
                CALL(sys_userfaultfd)
                CALL(sys_membarrier)
-               CALL(sys_mlock2)
+/* 390 */      CALL(sys_mlock2)
                CALL(sys_copy_file_range)
+               CALL(sys_preadv2)
+               CALL(sys_pwritev2)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 9b8c5a1134347aea35b7c60850d2b671efc9d768..fb1a69eb49c1a842fc604dc2b64f905e30a39aac 100644 (file)
@@ -236,7 +236,7 @@ ENTRY(__setup_mpu)
        mov     r0, #CONFIG_VECTORS_BASE        @ Cover from VECTORS_BASE
        ldr     r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
        /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
-       mov     r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
+       mov     r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
 
        setup_region r0, r5, r6, MPU_DATA_SIDE  @ VECTORS_BASE, PL0 NA, enabled
        beq     3f                              @ Memory-map not unified
index 139791ed473d5264682c004ea2ea7af8ddd28f5d..2c4bea39cf224f8368cca2b4dba61a5b807a3dde 100644 (file)
@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
        pr_info("CPU: div instructions available: patching division code\n");
 
        fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
+       asm ("" : "+g" (fn_addr));
        ((u32 *)fn_addr)[0] = udiv_instruction();
        ((u32 *)fn_addr)[1] = bx_lr_instruction();
        flush_icache_range(fn_addr, fn_addr + 8);
 
        fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
+       asm ("" : "+g" (fn_addr));
        ((u32 *)fn_addr)[0] = sdiv_instruction();
        ((u32 *)fn_addr)[1] = bx_lr_instruction();
        flush_icache_range(fn_addr, fn_addr + 8);
@@ -510,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
         */
        if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
            (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
-            cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
+            cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
                elf_hwcap &= ~HWCAP_SWP;
 }
 
index 6accd66d26f0edb01a35df2c0628b86266301b62..dded1b763c164c029432860c9a4fb9332ba9a300 100644 (file)
@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
        kvm_arm_init_debug();
 }
 
+static void cpu_hyp_reinit(void)
+{
+       if (is_kernel_in_hyp_mode()) {
+               /*
+                * cpu_init_stage2() is safe to call even if the PM
+                * event was cancelled before the CPU was reset.
+                */
+               cpu_init_stage2(NULL);
+       } else {
+               if (__hyp_get_vectors() == hyp_default_vectors)
+                       cpu_init_hyp_mode(NULL);
+       }
+}
+
 static int hyp_init_cpu_notify(struct notifier_block *self,
                               unsigned long action, void *cpu)
 {
        switch (action) {
        case CPU_STARTING:
        case CPU_STARTING_FROZEN:
-               if (__hyp_get_vectors() == hyp_default_vectors)
-                       cpu_init_hyp_mode(NULL);
-               break;
+               cpu_hyp_reinit();
        }
 
        return NOTIFY_OK;
@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
                                    unsigned long cmd,
                                    void *v)
 {
-       if (cmd == CPU_PM_EXIT &&
-           __hyp_get_vectors() == hyp_default_vectors) {
-               cpu_init_hyp_mode(NULL);
+       if (cmd == CPU_PM_EXIT) {
+               cpu_hyp_reinit();
                return NOTIFY_OK;
        }
 
@@ -1101,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
 {
        cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 }
+static void __init hyp_cpu_pm_exit(void)
+{
+       cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+}
 #else
 static inline void hyp_cpu_pm_init(void)
 {
 }
+static inline void hyp_cpu_pm_exit(void)
+{
+}
 #endif
 
 static void teardown_common_resources(void)
@@ -1127,6 +1145,20 @@ static int init_subsystems(void)
 {
        int err;
 
+       /*
+        * Register CPU Hotplug notifier
+        */
+       err = register_cpu_notifier(&hyp_init_cpu_nb);
+       if (err) {
+               kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
+               return err;
+       }
+
+       /*
+        * Register CPU lower-power notifier
+        */
+       hyp_cpu_pm_init();
+
        /*
         * Init HYP view of VGIC
         */
@@ -1166,6 +1198,8 @@ static void teardown_hyp_mode(void)
        free_hyp_pgds();
        for_each_possible_cpu(cpu)
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+       unregister_cpu_notifier(&hyp_init_cpu_nb);
+       hyp_cpu_pm_exit();
 }
 
 static int init_vhe_mode(void)
@@ -1270,19 +1304,6 @@ static int init_hyp_mode(void)
        free_boot_hyp_pgd();
 #endif
 
-       cpu_notifier_register_begin();
-
-       err = __register_cpu_notifier(&hyp_init_cpu_nb);
-
-       cpu_notifier_register_done();
-
-       if (err) {
-               kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
-               goto out_err;
-       }
-
-       hyp_cpu_pm_init();
-
        /* set size of VMID supported by CPU */
        kvm_vmid_bits = kvm_get_vmid_bits();
        kvm_info("%d-bit VMID\n", kvm_vmid_bits);
index 58dbd5c439df45bc10497954db0e61b433646cd6..d6d4191e68f23cd1f80d8faf95fa1405d156b30a 100644 (file)
@@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
        kvm_pfn_t pfn = *pfnp;
        gfn_t gfn = *ipap >> PAGE_SHIFT;
 
-       if (PageTransCompound(pfn_to_page(pfn))) {
+       if (PageTransCompoundMap(pfn_to_page(pfn))) {
                unsigned long mask;
                /*
                 * The address we faulted on is backed by a transparent huge
index d97c588550ad467775dfe8b05487eb03d540c2f4..bc4e63fa9808932b451931d13906d69dbe2bd3ee 100644 (file)
@@ -121,6 +121,11 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context)
        const char *partnum = NULL;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Factory Config not available without CONFIG_NVMEM\n");
+               goto bad_config;
+       }
+
        ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
                                &factory_config);
        if (ret != sizeof(struct factory_config)) {
index f55ef2ef2f92eb88c9c7485e5bfda537af126d31..742133b7266a64d7ca551a475f64e82ff2fa3841 100644 (file)
@@ -33,6 +33,11 @@ void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
        char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
        off_t offset = (off_t)context;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n");
+               return;
+       }
+
        /* Read MAC addr from EEPROM */
        if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
                pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
index 7c21760f590ffd0d4cd47fcafcbcaffe64a85952..875a2bab64f67b1a7dda20e216a264f01679f8a9 100644 (file)
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
                        if (IS_ERR(pd->clk[i]))
                                break;
 
-                       if (IS_ERR(pd->clk[i]))
+                       if (IS_ERR(pd->pclk[i]))
                                continue; /* Skip on first power up */
                        if (clk_set_parent(pd->clk[i], pd->pclk[i]))
                                pr_err("%s: error setting parent to clock%d\n",
index a5edd7d60266985472ac9de11e6fb9801f8eadeb..3d039ef021e0f0f4945040a01a8d0e79888bfa8f 100644 (file)
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
        if (!pdata)
                pdata = &default_esdhc_pdata;
 
-       return imx_add_platform_device(data->devid, data->id, res,
-                       ARRAY_SIZE(res), pdata, sizeof(*pdata));
+       return imx_add_platform_device_dmamask(data->devid, data->id, res,
+                       ARRAY_SIZE(res), pdata, sizeof(*pdata),
+                       DMA_BIT_MASK(32));
 }
index 7581e036bda62e5b58c01140929d08705f7bbb8e..ef9ed36e8a612154c5e3adbff9439b9a5871f85f 100644 (file)
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
        .cm_inst          = DRA7XX_CM_CORE_AON_IPU_INST,
        .clkdm_offs       = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
        .dep_bit          = DRA7XX_IPU_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP_SWSUP,
+       .flags            = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain mpu1_7xx_clkdm = {
index d85c24918c177d176fd191aa1ebcfaff5ce90f74..2abd53ae3e7a13f5801b7680ebd65464f2bede5c 100644 (file)
@@ -669,9 +669,9 @@ void __init dra7xxx_check_revision(void)
                case 0:
                        omap_revision = DRA722_REV_ES1_0;
                        break;
+               case 1:
                default:
-                       /* If we have no new revisions */
-                       omap_revision = DRA722_REV_ES1_0;
+                       omap_revision = DRA722_REV_ES2_0;
                        break;
                }
                break;
index 3c87e40650cf374daf3083437c8ec614fdbc136e..49de4dd227be4b7513b2aa7c414016464a86ce38 100644 (file)
@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
 void __init dra7xx_map_io(void)
 {
        iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
+       omap_barriers_init();
 }
 #endif
 /*
@@ -736,7 +737,8 @@ void __init omap5_init_late(void)
 #ifdef CONFIG_SOC_DRA7XX
 void __init dra7xx_init_early(void)
 {
-       omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
+       omap2_set_globals_tap(DRA7XX_CLASS,
+                             OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
        omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
        omap2_control_base_init();
        omap4_pm_init_early();
index f397bd6bd6e30149c525e270853701a916500e96..2c04f274147602c8a9131e14dbf0625a23b0f918 100644 (file)
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
  */
 static void irq_save_context(void)
 {
+       /* DRA7 has no SAR to save */
+       if (soc_is_dra7xx())
+               return;
+
        if (!sar_base)
                sar_base = omap4_get_sar_ram_base();
 
@@ -290,6 +294,9 @@ static void irq_sar_clear(void)
 {
        u32 val;
        u32 offset = SAR_BACKUP_STATUS_OFFSET;
+       /* DRA7 has no SAR to save */
+       if (soc_is_dra7xx())
+               return;
 
        if (soc_is_omap54xx())
                offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
index b6d62e4cdfddaf53f8d3aa7c1768b5e83255673e..2af6ff63e3b483f197d35761d43dfee90dcafe73 100644 (file)
@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
            (sf & SYSC_HAS_CLOCKACTIVITY))
                _set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-       /* If the cached value is the same as the new value, skip the write */
-       if (oh->_sysc_cache != v)
-               _write_sysconfig(v, oh);
+       _write_sysconfig(v, oh);
 
        /*
         * Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
                _set_master_standbymode(oh, idlemode, &v);
        }
 
-       _write_sysconfig(v, oh);
+       /* If the cached value is the same as the new value, skip the write */
+       if (oh->_sysc_cache != v)
+               _write_sysconfig(v, oh);
 }
 
 /**
index 39736ad2a7548d41cc2ca078084c6e04e414bf56..df8327713d06566eeb58d8691fcc1ffc2e5952d6 100644 (file)
@@ -582,9 +582,11 @@ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
        .user           = OCP_USER_MPU,
 };
 
+/* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */
 static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x10,
+       .srst_udelay    = 2,
        .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
                                SYSC_HAS_SOFTRESET,
        .idlemodes      = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART,
index 2dbd3785ee6f0d00b4c3d88bada28d83c0203b2a..d44e0e2f11063134e9dd031c0a55b523dd76befa 100644 (file)
@@ -198,7 +198,6 @@ void omap_sram_idle(void)
        int per_next_state = PWRDM_POWER_ON;
        int core_next_state = PWRDM_POWER_ON;
        int per_going_off;
-       int core_prev_state;
        u32 sdrc_pwr = 0;
 
        mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@ void omap_sram_idle(void)
                sdrc_write_reg(sdrc_pwr, SDRC_POWER);
 
        /* CORE */
-       if (core_next_state < PWRDM_POWER_ON) {
-               core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
-               if (core_prev_state == PWRDM_POWER_OFF) {
-                       omap3_core_restore_context();
-                       omap3_cm_restore_context();
-                       omap3_sram_restore_context();
-                       omap2_sms_restore_context();
-               }
+       if (core_next_state < PWRDM_POWER_ON &&
+           pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
+               omap3_core_restore_context();
+               omap3_cm_restore_context();
+               omap3_sram_restore_context();
+               omap2_sms_restore_context();
+       } else {
+               /*
+                * In off-mode resume path above, omap3_core_restore_context
+                * also handles the INTC autoidle restore done here so limit
+                * this to non-off mode resume paths so we don't do it twice.
+                */
+               omap3_intc_resume_idle();
        }
-       omap3_intc_resume_idle();
 
        pwrdm_post_transition(NULL);
 
index 70df8f6cddccbf1ad7b8372eb847c2a2bdfc755c..364418c78bf37a07151a3213fec5b8a926ecd65f 100644 (file)
@@ -489,6 +489,7 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define DRA752_REV_ES2_0       (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
 #define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 #define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
+#define DRA722_REV_ES2_0       (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
 
 void omap2xxx_check_revision(void);
 void omap3xxx_check_revision(void);
index 913a319c7b005b0d1c5832ed57e2a1185b182543..fffb697bbf0e3aa85c3eace051a23e3015271a8b 100644 (file)
@@ -1235,5 +1235,6 @@ static struct platform_device pxa2xx_pxa_dma = {
 void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
 {
        pxa_dma_pdata.dma_channels = nb_channels;
+       pxa_dma_pdata.nb_requestors = nb_requestors;
        pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
 }
index c6f6ed1cbed08a7f0ebfa40d670ab95b7b1b2d88..36e3c79f497321f580166a7c332e714fb985537b 100644 (file)
@@ -61,10 +61,7 @@ config SA1100_H3100
        select MFD_IPAQ_MICRO
        help
          Say Y here if you intend to run this kernel on the Compaq iPAQ
-         H3100 handheld computer.  Information about this machine and the
-         Linux port to this machine can be found at:
-
-         <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100>
+         H3100 handheld computer.
 
 config SA1100_H3600
        bool "Compaq iPAQ H3600/H3700"
@@ -73,10 +70,7 @@ config SA1100_H3600
        select MFD_IPAQ_MICRO
        help
          Say Y here if you intend to run this kernel on the Compaq iPAQ
-         H3600 handheld computer.  Information about this machine and the
-         Linux port to this machine can be found at:
-
-         <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
+         H3600 and H3700 handheld computers.
 
 config SA1100_BADGE4
        bool "HP Labs BadgePAD 4"
index ad008e4b0c49a4aa1be23992be444198d67e41d5..67d79f9c6bad5be8be0418a331d90f4be42e94f7 100644 (file)
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
 void __init shmobile_init_delay(void)
 {
        struct device_node *np, *cpus;
-       bool is_a7_a8_a9 = false;
-       bool is_a15 = false;
+       unsigned int div = 0;
        bool has_arch_timer = false;
        u32 max_freq = 0;
 
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
                if (!of_property_read_u32(np, "clock-frequency", &freq))
                        max_freq = max(max_freq, freq);
 
-               if (of_device_is_compatible(np, "arm,cortex-a8") ||
-                   of_device_is_compatible(np, "arm,cortex-a9")) {
-                       is_a7_a8_a9 = true;
-               } else if (of_device_is_compatible(np, "arm,cortex-a7")) {
-                       is_a7_a8_a9 = true;
-                       has_arch_timer = true;
-               } else if (of_device_is_compatible(np, "arm,cortex-a15")) {
-                       is_a15 = true;
+               if (of_device_is_compatible(np, "arm,cortex-a8")) {
+                       div = 2;
+               } else if (of_device_is_compatible(np, "arm,cortex-a9")) {
+                       div = 1;
+               } else if (of_device_is_compatible(np, "arm,cortex-a7") ||
+                        of_device_is_compatible(np, "arm,cortex-a15")) {
+                       div = 1;
                        has_arch_timer = true;
                }
        }
 
        of_node_put(cpus);
 
-       if (!max_freq)
+       if (!max_freq || !div)
                return;
 
-       if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
-               if (is_a7_a8_a9)
-                       shmobile_setup_delay_hz(max_freq, 1, 3);
-               else if (is_a15)
-                       shmobile_setup_delay_hz(max_freq, 2, 4);
-       }
+       if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+               shmobile_setup_delay_hz(max_freq, 1, div);
 }
index 5d94b7a2fb108dc1bc1c5fcdd4edfa80ab2c0c7f..c160fa3007e943be451b82318149a69dce4d7e54 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 
        .arch   armv7-a
+       .arm
 
 ENTRY(secondary_trampoline)
        /* CPU1 will always fetch from 0x0 when it is brought out of reset.
index 69141357afe81991595f81348df9f2f0964db06f..db04142f88bc3fab0519a0e21db5ed0a022c19c7 100644 (file)
@@ -120,7 +120,7 @@ static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
        if (ret)
                return ret;
 
-       uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, sizeof(SZ_4));
+       uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
        if (!uniphier_smp_rom_boot_rsv2) {
                pr_err("failed to map ROM_BOOT_RSV2 register\n");
                return -ENOMEM;
index deac58d5f1f7cf053ca215ec718c6902961a7908..c941e93048ad4d2ba09dabc2bb9451eaabcf8760 100644 (file)
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        if (!mask)
                return NULL;
 
-       buf = kzalloc(sizeof(*buf), gfp);
+       buf = kzalloc(sizeof(*buf),
+                     gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
        if (!buf)
                return NULL;
 
index d0ba3551d49a4b05371db7a12c02e151be9d4eae..3cced8455727953a2525571c5a62b5ad884e8bee 100644 (file)
@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         */
        if (mapping && cache_is_vipt_aliasing())
                flush_pfn_alias(page_to_pfn(page),
-                               page->index << PAGE_CACHE_SHIFT);
+                               page->index << PAGE_SHIFT);
 }
 
 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
         *   data in the current VM view associated with this page.
         * - aliasing VIPT: we only need to find one mapping of this page.
         */
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page->index;
 
        flush_dcache_mmap_lock(mapping);
        vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
index 1dd10936d68d0422b328128fbb6a234d77adee1d..d5805e4bf2fc71a0db019c198daedccb847a8376 100644 (file)
@@ -87,7 +87,6 @@ static unsigned long irbar_read(void)
 /* MPU initialisation functions */
 void __init sanity_check_meminfo_mpu(void)
 {
-       int i;
        phys_addr_t phys_offset = PHYS_OFFSET;
        phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
        struct memblock_region *reg;
@@ -110,11 +109,13 @@ void __init sanity_check_meminfo_mpu(void)
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
-                        * all blocks afterwards
+                        * all blocks afterwards in one go (we can't remove
+                        * blocks separately while iterating)
                         */
                        pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
-                                 &mem_start, &reg->base);
-                       memblock_remove(reg->base, reg->size);
+                                 &mem_end, &reg->base);
+                       memblock_remove(reg->base, 0 - reg->base);
+                       break;
                }
        }
 
@@ -144,7 +145,7 @@ void __init sanity_check_meminfo_mpu(void)
                pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
                                &specified_mem_size, &aligned_region_size);
                memblock_remove(mem_start + aligned_region_size,
-                               specified_mem_size - aligned_round_size);
+                               specified_mem_size - aligned_region_size);
 
                mem_end = mem_start + aligned_region_size;
        }
@@ -261,7 +262,7 @@ void __init mpu_setup(void)
                return;
 
        region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
-                                       ilog2(meminfo.bank[0].size),
+                                       ilog2(memblock.memory.regions[0].size),
                                        MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
        if (region_err) {
                panic("MPU region initialization failure! %d", region_err);
@@ -285,7 +286,7 @@ void __init arm_mm_memblock_reserve(void)
         * some architectures which the DRAM is the exception vector to trap,
         * alloc_page breaks with error, although it is not NULL, but "0."
         */
-       memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
+       memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE);
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * There is no dedicated vector page on V7-M. So nothing needs to be
index 0f8963a7e7d9db28465c63a2bab69a615fb6a2cb..6fcaac8e200f888e18207ae6cdfeca8adc000c5e 100644 (file)
@@ -281,12 +281,12 @@ __v7_ca17mp_setup:
        bl      v7_invalidate_l1
        ldmia   r12, {r1-r6, lr}
 #ifdef CONFIG_SMP
+       orr     r10, r10, #(1 << 6)             @ Enable SMP/nAMP mode
        ALT_SMP(mrc     p15, 0, r0, c1, c0, 1)
-       ALT_UP(mov      r0, #(1 << 6))          @ fake it for UP
-       tst     r0, #(1 << 6)                   @ SMP/nAMP mode enabled?
-       orreq   r0, r0, #(1 << 6)               @ Enable SMP/nAMP mode
-       orreq   r0, r0, r10                     @ Enable CPU-specific SMP bits
-       mcreq   p15, 0, r0, c1, c0, 1
+       ALT_UP(mov      r0, r10)                @ fake it for UP
+       orr     r10, r10, r0                    @ Set required bits
+       teq     r10, r0                         @ Were they already set?
+       mcrne   p15, 0, r10, c1, c0, 1          @ No, update register
 #endif
        b       __v7_setup_cont
 
index a055a5d443b76b83dde3c5f68739e54f9bb9ac2d..ba04877515245bfd4c4649488ce518a868533fdd 100644 (file)
                                     <0 113 4>,
                                     <0 114 4>,
                                     <0 115 4>;
+                       channel = <12>;
                        port-id = <1>;
                        dma-coherent;
                        clocks = <&xge1clk 0>;
index ae4a173df493a08c092b67a2aba4d08121ce8d55..5147d76989241612bb242025487bbb1aa7f55aa4 100644 (file)
                                     <0x0 0x65 0x4>,
                                     <0x0 0x66 0x4>,
                                     <0x0 0x67 0x4>;
+                       channel = <0>;
                        dma-coherent;
                        clocks = <&xge0clk 0>;
                        /* mac address will be overwritten by the bootloader */
index c49b5a85809c5bc96d48f3b8799b965a8f96e860..85820e2bca9df4ce72f63f69670d88586c23930a 100644 (file)
                reg = <0x0 0x30000000  0x0 0x10000000>;
                reg-names = "PCI ECAM";
 
-                         /* IO 0x4000_0000 - 0x4001_0000 */
-               ranges = <0x01000000 0 0x40000000 0 0x40000000 0 0x00010000
-                         /* MEM 0x4800_0000 - 0x5000_0000 */
-                         0x02000000 0 0x48000000 0 0x48000000 0 0x08000000
-                         /* MEM64 pref 0x6_0000_0000 - 0x7_0000_0000 */
-                         0x43000000 6 0x00000000 6 0x00000000 1 0x00000000>;
+               /*
+                * PCI ranges:
+                *   IO         no supported
+                *   MEM        0x4000_0000 - 0x6000_0000
+                *   MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
+                */
+               ranges =
+                 <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
+                  0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map =
                      /* addr  pin  ic   icaddr  icintr */
index 933cba3599189c7f85c6a6adea77eb661be459f5..b6a130c2e5a49909f5b52bd53573cf3c633da93c 100644 (file)
@@ -24,17 +24,19 @@ soc0: soc@000000000 {
        };
 
        dsaf0: dsa@c7000000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                compatible = "hisilicon,hns-dsaf-v1";
                mode = "6port-16rss";
                interrupt-parent = <&mbigen_dsa>;
 
-               reg = <0x0 0xC0000000 0x0 0x420000
-                      0x0 0xC2000000 0x0 0x300000
-                      0x0 0xc5000000 0x0 0x890000
+               reg = <0x0 0xc5000000 0x0 0x890000
                       0x0 0xc7000000 0x0 0x60000
                       >;
 
-               phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>;
+               reg-names = "ppe-base","dsaf-base";
+               subctrl-syscon = <&dsaf_subctrl>;
+               reset-field-offset = <0>;
                interrupts = <
                        /* [14] ge fifo err 8 / xge 6**/
                        149 0x4 150 0x4 151 0x4 152 0x4
@@ -122,12 +124,31 @@ soc0: soc@000000000 {
                buf-size = <4096>;
                desc-num = <1024>;
                dma-coherent;
+
+               port@0 {
+                       reg = <0>;
+                       serdes-syscon = <&serdes_ctrl0>;
+               };
+               port@1 {
+                       reg = <1>;
+                       serdes-syscon = <&serdes_ctrl0>;
+               };
+               port@4 {
+                       reg = <4>;
+                       phy-handle = <&soc0_phy0>;
+                       serdes-syscon = <&serdes_ctrl1>;
+               };
+               port@5 {
+                       reg = <5>;
+                       phy-handle = <&soc0_phy1>;
+                       serdes-syscon = <&serdes_ctrl1>;
+               };
        };
 
        eth0: ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
                ae-handle = <&dsaf0>;
-               port-id = <0>;
+               port-idx-in-ae = <0>;
                local-mac-address = [00 00 00 01 00 58];
                status = "disabled";
                dma-coherent;
@@ -135,56 +156,25 @@ soc0: soc@000000000 {
        eth1: ethernet@1{
                compatible = "hisilicon,hns-nic-v1";
                ae-handle = <&dsaf0>;
-               port-id = <1>;
+               port-idx-in-ae = <1>;
+               local-mac-address = [00 00 00 01 00 59];
                status = "disabled";
                dma-coherent;
        };
-       eth2: ethernet@2{
+       eth2: ethernet@4{
                compatible = "hisilicon,hns-nic-v1";
                ae-handle = <&dsaf0>;
-               port-id = <2>;
+               port-idx-in-ae = <4>;
                local-mac-address = [00 00 00 01 00 5a];
                status = "disabled";
                dma-coherent;
        };
-       eth3: ethernet@3{
+       eth3: ethernet@5{
                compatible = "hisilicon,hns-nic-v1";
                ae-handle = <&dsaf0>;
-               port-id = <3>;
+               port-idx-in-ae = <5>;
                local-mac-address = [00 00 00 01 00 5b];
                status = "disabled";
                dma-coherent;
        };
-       eth4: ethernet@4{
-               compatible = "hisilicon,hns-nic-v1";
-               ae-handle = <&dsaf0>;
-               port-id = <4>;
-               local-mac-address = [00 00 00 01 00 5c];
-               status = "disabled";
-               dma-coherent;
-       };
-       eth5: ethernet@5{
-               compatible = "hisilicon,hns-nic-v1";
-               ae-handle = <&dsaf0>;
-               port-id = <5>;
-               local-mac-address = [00 00 00 01 00 5d];
-               status = "disabled";
-               dma-coherent;
-       };
-       eth6: ethernet@6{
-               compatible = "hisilicon,hns-nic-v1";
-               ae-handle = <&dsaf0>;
-               port-id = <6>;
-               local-mac-address = [00 00 00 01 00 5e];
-               status = "disabled";
-               dma-coherent;
-       };
-       eth7: ethernet@7{
-               compatible = "hisilicon,hns-nic-v1";
-               ae-handle = <&dsaf0>;
-               port-id = <7>;
-               local-mac-address = [00 00 00 01 00 5f];
-               status = "disabled";
-               dma-coherent;
-       };
 };
index a7315ebe3883f276456280de64d31537099cd292..706d2426024f7f0665c47dc37b92f566cb87a726 100644 (file)
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <0>;
-               status = "disabled";
        };
 
        soc {
index 727ae5f8c4e716178a4cda1e484b5118a16646c5..b0ed44313a5bc134c4919f5b82b02eedfa01a590 100644 (file)
@@ -70,7 +70,6 @@
                i2c3 = &i2c3;
                i2c4 = &i2c4;
                i2c5 = &i2c5;
-               i2c6 = &i2c6;
        };
 };
 
index e682a3f52791b38caf45defec637e8247e64f6a3..651c9d9d2d54658b7fa4eb73b1e0ed5f57d02995 100644 (file)
 
                i2c2: i2c@58782000 {
                        compatible = "socionext,uniphier-fi2c";
-                       status = "disabled";
                        reg = <0x58782000 0x80>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        interrupts = <0 43 4>;
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_i2c2>;
                        clocks = <&i2c_clk>;
-                       clock-frequency = <100000>;
+                       clock-frequency = <400000>;
                };
 
                i2c3: i2c@58783000 {
 
                i2c4: i2c@58784000 {
                        compatible = "socionext,uniphier-fi2c";
+                       status = "disabled";
                        reg = <0x58784000 0x80>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        interrupts = <0 45 4>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_i2c4>;
                        clocks = <&i2c_clk>;
-                       clock-frequency = <400000>;
+                       clock-frequency = <100000>;
                };
 
                i2c5: i2c@58785000 {
                        clock-frequency = <400000>;
                };
 
-               i2c6: i2c@58786000 {
-                       compatible = "socionext,uniphier-fi2c";
-                       reg = <0x58786000 0x80>;
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       interrupts = <0 26 4>;
-                       clocks = <&i2c_clk>;
-                       clock-frequency = <400000>;
-               };
-
                system_bus: system-bus@58c00000 {
                        compatible = "socionext,uniphier-system-bus";
                        status = "disabled";
index 0e391dbfc42023aad2dabf26b5e9d7b49fa8aec2..3f29887995bcdc6844d29b80fec33e4e1279fc3b 100644 (file)
 #define VTCR_EL2_SL0_LVL1      (1 << 6)
 #define VTCR_EL2_T0SZ_MASK     0x3f
 #define VTCR_EL2_T0SZ_40B      24
-#define VTCR_EL2_VS            19
+#define VTCR_EL2_VS_SHIFT      19
+#define VTCR_EL2_VS_8BIT       (0 << VTCR_EL2_VS_SHIFT)
+#define VTCR_EL2_VS_16BIT      (1 << VTCR_EL2_VS_SHIFT)
 
 /*
  * We configure the Stage-2 page tables to always restrict the IPA space to be
  */
 #define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
                                 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-                                VTCR_EL2_RES1)
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
 #define VTTBR_X                (38 - VTCR_EL2_T0SZ_40B)
 #else
 /*
  */
 #define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
                                 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-                                VTCR_EL2_RES1)
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
 #define VTTBR_X                (37 - VTCR_EL2_T0SZ_40B)
 #endif
 
index eb7490d232a0f39328c42fd646de8b5548feb178..40a0a24e6c98c24b3d6aceeb1e979706cf45163c 100644 (file)
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
 
 extern u32 __kvm_get_mdcr_el2(void);
 
-extern void __init_stage2_translation(void);
+extern u32 __init_stage2_translation(void);
 
 #endif
 
index b7e82a795ac9e087098c957b42e717c0fad0985c..f5c6bd2541ef4d16c6f3b26cee01cc013d50055d 100644 (file)
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 
-/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
-
 static inline void __cpu_init_stage2(void)
 {
-       kvm_call_hyp(__init_stage2_translation);
+       u32 parange = kvm_call_hyp(__init_stage2_translation);
+
+       WARN_ONCE(parange < 40,
+                 "PARange is %d bits, unsupported configuration!", parange);
 }
 
 #endif /* __ARM64_KVM_HOST_H__ */
index 1a78d6e2a78b58bf21113de3810785a81a35a29d..12874164b0ae5198cfc9d00d1721d8f58cb8ae51 100644 (file)
 #define ID_AA64MMFR1_VMIDBITS_SHIFT    4
 #define ID_AA64MMFR1_HADBS_SHIFT       0
 
+#define ID_AA64MMFR1_VMIDBITS_8                0
+#define ID_AA64MMFR1_VMIDBITS_16       2
+
 /* id_aa64mmfr2 */
 #define ID_AA64MMFR2_UAO_SHIFT         4
 
index 4203d5f257bcf396f8cdadb4f613d7048b486f59..85da0f599cd6ac174c52eb1a8b376d171686c718 100644 (file)
@@ -588,6 +588,15 @@ set_hcr:
        msr     vpidr_el2, x0
        msr     vmpidr_el2, x1
 
+       /*
+        * When VHE is not in use, early init of EL2 and EL1 needs to be
+        * done here.
+        * When VHE _is_ in use, EL1 will not be used in the host and
+        * requires no configuration, and all non-hyp-specific EL2 setup
+        * will be done via the _EL1 system register aliases in __cpu_setup.
+        */
+       cbnz    x2, 1f
+
        /* sctlr_el1 */
        mov     x0, #0x0800                     // Set/clear RES{1,0} bits
 CPU_BE(        movk    x0, #0x33d0, lsl #16    )       // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE(     movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
        /* Coprocessor traps. */
        mov     x0, #0x33ff
        msr     cptr_el2, x0                    // Disable copro. traps to EL2
+1:
 
 #ifdef CONFIG_COMPAT
        msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
 
        .macro  update_early_cpu_boot_status status, tmp1, tmp2
        mov     \tmp2, #\status
-       str_l   \tmp2, __early_cpu_boot_status, \tmp1
+       adr_l   \tmp1, __early_cpu_boot_status
+       str     \tmp2, [\tmp1]
        dmb     sy
        dc      ivac, \tmp1                     // Invalidate potentially stale cache line
        .endm
index aef3605a8c47845ce2ce2d32f75e0fd59a8e4405..18a71bcd26ee4a15ea63e9f85102713f644495d5 100644 (file)
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
 static int smp_spin_table_cpu_init(unsigned int cpu)
 {
        struct device_node *dn;
+       int ret;
 
        dn = of_get_cpu_node(cpu, NULL);
        if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
        /*
         * Determine the address from which the CPU is polling.
         */
-       if (of_property_read_u64(dn, "cpu-release-addr",
-                                &cpu_release_addr[cpu])) {
+       ret = of_property_read_u64(dn, "cpu-release-addr",
+                                  &cpu_release_addr[cpu]);
+       if (ret)
                pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
                       cpu);
 
-               return -1;
-       }
+       of_node_put(dn);
 
-       return 0;
+       return ret;
 }
 
 static int smp_spin_table_cpu_prepare(unsigned int cpu)
index bfc54fd82797b08322b99b1cb6f553c5762b8dd4..bcbe761a5a3d1fa579fb211d89c165ace3b7c5d1 100644 (file)
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 
-void __hyp_text __init_stage2_translation(void)
+u32 __hyp_text __init_stage2_translation(void)
 {
        u64 val = VTCR_EL2_FLAGS;
+       u64 parange;
        u64 tmp;
 
        /*
@@ -30,14 +31,50 @@ void __hyp_text __init_stage2_translation(void)
         * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
         * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
         */
-       val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+       parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+       val |= parange << 16;
+
+       /* Compute the actual PARange... */
+       switch (parange) {
+       case 0:
+               parange = 32;
+               break;
+       case 1:
+               parange = 36;
+               break;
+       case 2:
+               parange = 40;
+               break;
+       case 3:
+               parange = 42;
+               break;
+       case 4:
+               parange = 44;
+               break;
+       case 5:
+       default:
+               parange = 48;
+               break;
+       }
+
+       /*
+        * ... and clamp it to 40 bits, unless we have some braindead
+        * HW that implements less than that. In all cases, we'll
+        * return that value for the rest of the kernel to decide what
+        * to do.
+        */
+       val |= 64 - (parange > 40 ? 40 : parange);
 
        /*
         * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
         * bit in VTCR_EL2.
         */
-       tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
-       val |= (tmp == 2) ? VTCR_EL2_VS : 0;
+       tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
+       val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
+                       VTCR_EL2_VS_16BIT :
+                       VTCR_EL2_VS_8BIT;
 
        write_sysreg(val, vtcr_el2);
+
+       return parange;
 }
index 8832083e1cb88f6573c8a1ced391cb3d7aa1c527..b515809be2b961a20260579bdfd6cc8d86208643 100644 (file)
@@ -158,11 +158,6 @@ static int mcfgpio_to_irq(struct gpio_chip *chip, unsigned offset)
                return -EINVAL;
 }
 
-static struct bus_type mcfgpio_subsys = {
-       .name           = "gpio",
-       .dev_name       = "gpio",
-};
-
 static struct gpio_chip mcfgpio_chip = {
        .label                  = "mcfgpio",
        .request                = mcfgpio_request,
@@ -178,8 +173,7 @@ static struct gpio_chip mcfgpio_chip = {
 
 static int __init mcfgpio_sysinit(void)
 {
-       gpiochip_add_data(&mcfgpio_chip, NULL);
-       return subsys_system_register(&mcfgpio_subsys, NULL);
+       return gpiochip_add_data(&mcfgpio_chip, NULL);
 }
 
 core_initcall(mcfgpio_sysinit);
index d1fc4796025edb8769ee01195e64b91821f3625a..3ee6976f60885e5b2e0e4e2a80df6d824de14694 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-amiga"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -64,7 +63,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -359,6 +359,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -452,6 +453,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -468,6 +470,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -549,6 +552,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -557,7 +561,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -565,12 +568,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -594,7 +594,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 9bfe8be3658c18231ca4473d8c075a1fb2ac4d5e..e96787ffcbced33f9893d2760259177086b13c1f 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-apollo"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -411,6 +412,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -427,6 +429,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -508,6 +511,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -516,7 +520,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -524,12 +527,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -553,7 +553,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index ebdcfae555801cd1c7367d4ca40974b3d39099cb..083fe6beac149da81250ca4fccd50798f9e4ef0d 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-atari"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -350,6 +350,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -432,6 +433,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -448,6 +450,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -529,6 +532,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -537,7 +541,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -545,12 +548,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -574,7 +574,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 8acc65e54995388614666716febdab1dda706376..475130c06dcba04b646ec2e7c199aee7045dea7d 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-bvme6000"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 0c6a3d52b26e2b2559f24963040d3ba65a91ed47..4339658c200f3eb8af4bc2645836e109466055cb 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-hp300"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -413,6 +414,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -429,6 +431,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -510,6 +513,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -518,7 +522,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -526,12 +529,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 12a8a6cb32f4914f06c1f5d4c8e5dd6ba31381ef..831cc8c3a2e259f67d318257e72bcab84e36b8c1 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mac"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -61,7 +60,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -357,6 +357,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -435,6 +436,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -451,6 +453,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -532,6 +535,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -540,7 +544,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -548,12 +551,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -577,7 +577,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 64ff2dcb34c89a3e60e26f9c468d654ed943e97f..6377afeb522bbb9a235d1bd57f36cc14ca36ac5b 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-multi"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -71,7 +70,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -295,7 +293,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -390,6 +390,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -515,6 +516,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -531,6 +533,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -612,6 +615,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -620,7 +624,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -628,12 +631,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -657,7 +657,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 07fc6abcfe0c50e4b656a63a9da36c728b13cec4..4304b3d56262bc677383ba4389b65d5e9a7625cd 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme147"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -59,7 +58,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -280,7 +278,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -339,6 +339,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 69903ded88f71d1d51ad4b430acbd2f745fdf867..074bda4094ffd5b0913d4411371e2597774faab2 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme16x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index bd8401686ddef143bf036159cb3f4ea650772f32..07b9fa8d7f2ea71dd09a26c8d9abe4a519baa07c 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-q40"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -346,6 +346,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -426,6 +427,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -442,6 +444,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -523,6 +526,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -531,7 +535,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -539,12 +542,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -568,7 +568,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 5f9fb3ab9636808d46b75f3696f477ab91e6dd79..36e6fae02d458e2dcb1a96c0caee68a5301f0341 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -509,7 +513,6 @@ CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -517,12 +520,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -546,7 +546,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 5d1c674530e2ba73ca43ffc4940f139772962152..903acf929511e5066403bfdd1c4d78bbc4084c35 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -510,7 +514,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -518,12 +521,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -547,7 +547,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index bafaff6dcd7bda8a28101f140159f9a7a76638db..a857d82ec5094abc30e353f25370365194a01194 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            377
+#define NR_syscalls            379
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 0ca729665f29e9d67851aed2c1c52be75bbd078b..9fe674bf911fd2a4e61d7119f9b91ffbdddf44f5 100644 (file)
 #define __NR_membarrier                374
 #define __NR_mlock2            375
 #define __NR_copy_file_range   376
+#define __NR_preadv2           377
+#define __NR_pwritev2          378
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 8bb94261ff97d953fcfbe7c305e6a7ecfce27a97..d6fd6d9ced2474ab477b0becefbd5f695d256e96 100644 (file)
@@ -397,3 +397,5 @@ ENTRY(sys_call_table)
        .long sys_membarrier
        .long sys_mlock2                /* 375 */
        .long sys_copy_file_range
+       .long sys_preadv2
+       .long sys_pwritev2
index 745695db5ba068d2b0416b879234e2a44174eea7..f2f264b5aafe224c2efe7bc489d34b4b7c145aad 100644 (file)
@@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
        au1x_dma_chan_t *cp;
 
        /*
-        * We do the intialization on the first channel allocation.
+        * We do the initialization on the first channel allocation.
         * We have to wait because of the interrupt handler initialization
         * which can't be done successfully during board set up.
         */
@@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
        dp->dscr_source1 = dscr->dscr_source1;
        dp->dscr_cmd1 = dscr->dscr_cmd1;
        nbytes = dscr->dscr_cmd1;
-       /* Allow the caller to specifiy if an interrupt is generated */
+       /* Allow the caller to specify if an interrupt is generated */
        dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
        dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
        ctp->chan_ptr->ddma_dbell = 0;
index bdeed9d13c6fe01c0c804b695af44efd16817f46..433c4b9a9f0a92af20e31f1f16f9be5cde955b12 100644 (file)
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
        if (board == BCSR_WHOAMI_DB1500) {
                c0 = AU1500_GPIO2_INT;
                c1 = AU1500_GPIO5_INT;
-               d0 = AU1500_GPIO0_INT;
-               d1 = AU1500_GPIO3_INT;
+               d0 = 0; /* GPIO number, NOT irq! */
+               d1 = 3; /* GPIO number, NOT irq! */
                s0 = AU1500_GPIO1_INT;
                s1 = AU1500_GPIO4_INT;
        } else if (board == BCSR_WHOAMI_DB1100) {
                c0 = AU1100_GPIO2_INT;
                c1 = AU1100_GPIO5_INT;
-               d0 = AU1100_GPIO0_INT;
-               d1 = AU1100_GPIO3_INT;
+               d0 = 0; /* GPIO number, NOT irq! */
+               d1 = 3; /* GPIO number, NOT irq! */
                s0 = AU1100_GPIO1_INT;
                s1 = AU1100_GPIO4_INT;
 
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
        } else if (board == BCSR_WHOAMI_DB1000) {
                c0 = AU1000_GPIO2_INT;
                c1 = AU1000_GPIO5_INT;
-               d0 = AU1000_GPIO0_INT;
-               d1 = AU1000_GPIO3_INT;
+               d0 = 0; /* GPIO number, NOT irq! */
+               d1 = 3; /* GPIO number, NOT irq! */
                s0 = AU1000_GPIO1_INT;
                s1 = AU1000_GPIO4_INT;
                platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
        } else if ((board == BCSR_WHOAMI_PB1500) ||
                   (board == BCSR_WHOAMI_PB1500R2)) {
                c0 = AU1500_GPIO203_INT;
-               d0 = AU1500_GPIO201_INT;
+               d0 = 1; /* GPIO number, NOT irq! */
                s0 = AU1500_GPIO202_INT;
                twosocks = 0;
                flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
                 */
        } else if (board == BCSR_WHOAMI_PB1100) {
                c0 = AU1100_GPIO11_INT;
-               d0 = AU1100_GPIO9_INT;
+               d0 = 9; /* GPIO number, NOT irq! */
                s0 = AU1100_GPIO10_INT;
                twosocks = 0;
                flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
        } else
                return 0; /* unknown board, no further dev setup to do */
 
-       irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
        irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
        irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
 
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
                c0, d0, /*s0*/0, 0, 0);
 
        if (twosocks) {
-               irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
                irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
                irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
 
index b518f029f5e7bcd97e62b5017408652fa7a111dd..1c01d6eadb08d89a56519b512cc02cb1a186704b 100644 (file)
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
                AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
                AU1000_PCMCIA_IO_PHYS_ADDR,
                AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
-               AU1550_GPIO3_INT, AU1550_GPIO0_INT,
+               AU1550_GPIO3_INT, 0,
                /*AU1550_GPIO21_INT*/0, 0, 0);
 
        db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
                AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
                AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
                AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
-               AU1550_GPIO5_INT, AU1550_GPIO1_INT,
+               AU1550_GPIO5_INT, 1,
                /*AU1550_GPIO22_INT*/0, 0, 1);
 
        platform_device_register(&db1550_nand_dev);
index eb5117ced95ac0a49dc92a3caa1cc32b56b19022..618dfd735eede5e771061d6a42943150d19b93a6 100644 (file)
@@ -26,8 +26,7 @@
 #include "common.h"
 
 #define AR71XX_BASE_FREQ       40000000
-#define AR724X_BASE_FREQ       5000000
-#define AR913X_BASE_FREQ       5000000
+#define AR724X_BASE_FREQ       40000000
 
 static struct clk *clks[3];
 static struct clk_onecell_data clk_data = {
@@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void)
        div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
        freq = div * ref_rate;
 
-       div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK);
-       freq *= div;
+       div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
+       freq /= div;
 
        cpu_rate = freq;
 
@@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void)
        clk_add_alias("uart", NULL, "ahb", NULL);
 }
 
-static void __init ar913x_clocks_init(void)
-{
-       unsigned long ref_rate;
-       unsigned long cpu_rate;
-       unsigned long ddr_rate;
-       unsigned long ahb_rate;
-       u32 pll;
-       u32 freq;
-       u32 div;
-
-       ref_rate = AR913X_BASE_FREQ;
-       pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
-
-       div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
-       freq = div * ref_rate;
-
-       cpu_rate = freq;
-
-       div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
-       ddr_rate = freq / div;
-
-       div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
-       ahb_rate = cpu_rate / div;
-
-       ath79_add_sys_clkdev("ref", ref_rate);
-       clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-       clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-       clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
-
-       clk_add_alias("wdt", NULL, "ahb", NULL);
-       clk_add_alias("uart", NULL, "ahb", NULL);
-}
-
 static void __init ar933x_clocks_init(void)
 {
        unsigned long ref_rate;
@@ -443,10 +409,8 @@ void __init ath79_clocks_init(void)
 {
        if (soc_is_ar71xx())
                ar71xx_clocks_init();
-       else if (soc_is_ar724x())
+       else if (soc_is_ar724x() || soc_is_ar913x())
                ar724x_clocks_init();
-       else if (soc_is_ar913x())
-               ar913x_clocks_init();
        else if (soc_is_ar933x())
                ar933x_clocks_init();
        else if (soc_is_ar934x())
index 959c145a0a2c1654d1ac3b58a98db92ef435a713..ca7ad131d05703003ac007a6e76e8b8a358115db 100644 (file)
@@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void)
 {
 #if defined(CONFIG_BCM47XX_SSB)
        if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
-               pr_warn("Failed to registered ssb SPROM handler\n");
+               pr_warn("Failed to register ssb SPROM handler\n");
 #endif
 
 #if defined(CONFIG_BCM47XX_BCMA)
        if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
-               pr_warn("Failed to registered bcma SPROM handler\n");
+               pr_warn("Failed to register bcma SPROM handler\n");
 #endif
 }
index 4eff1ef02eff9abec4ae03d4248f144ef8fa0058..309d2ad67e4d6ee3d7c8c844506b5ae8bd92b24d 100644 (file)
@@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)                += $(obj)/uart-alchemy.o
 endif
 
-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
 
-$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
-$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c
+extra-y += ashldi3.c bswapsi.c
+$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
+$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
        $(call cmd,shipped)
 
 targets := $(notdir $(vmlinuzobjs-y))
index adb33e3550430de99adb3b2e2c211310fabc65a9..56035e5b70084918a121247ace50f19d3fefdfe9 100644 (file)
@@ -82,7 +82,7 @@
                };
 
                gisb-arb@400000 {
-                       compatible = "brcm,bcm7400-gisb-arb";
+                       compatible = "brcm,bcm7435-gisb-arb";
                        reg = <0x400000 0xdc>;
                        native-endian;
                        interrupt-parent = <&sun_l2_intc>;
index 3ad4ba9b12fd6a649eb07b201414f18ba60b83a7..3c2ed9ee5b2f89d3f8b17eef1328b1dbb68702eb 100644 (file)
@@ -83,7 +83,7 @@
                        };
 
                        pll: pll-controller@18050000 {
-                               compatible = "qca,ar9132-ppl",
+                               compatible = "qca,ar9132-pll",
                                                "qca,ar9130-pll";
                                reg = <0x18050000 0x20>;
 
index e535ee3c26a4e8402a5da1dcc8c0f748205dacca..4f1540e5f963493201406f80e11de3e9d198d4c1 100644 (file)
@@ -18,7 +18,7 @@
                reg = <0x0 0x2000000>;
        };
 
-       extosc: oscillator {
+       extosc: ref {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <40000000>;
index e59d1b79f24cd54cd083d2466f091279b83110b8..2f415d9d0f3c18690690f61171df7a968e1b4a52 100644 (file)
@@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                gmx_rx_int_en.s.pause_drp = 1;
                /* Skipping gmx_rx_int_en.s.reserved_16_18 */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                /*gmx_rx_int_en.s.phy_spd = 1; */
                /*gmx_rx_int_en.s.phy_link = 1; */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                /*gmx_rx_int_en.s.phy_spd = 1; */
                /*gmx_rx_int_en.s.phy_link = 1; */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                /*gmx_rx_int_en.s.phy_spd = 1; */
                /*gmx_rx_int_en.s.phy_link = 1; */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                /*gmx_rx_int_en.s.phy_spd = 1; */
                /*gmx_rx_int_en.s.phy_link = 1; */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                /*gmx_rx_int_en.s.phy_spd = 1; */
                /*gmx_rx_int_en.s.phy_link = 1; */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
                gmx_rx_int_en.s.pause_drp = 1;
                /* Skipping gmx_rx_int_en.s.reserved_16_18 */
                /*gmx_rx_int_en.s.ifgerr = 1; */
-               /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+               /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
                /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
                /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
                /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
index 87be167a7a6ae07ce1b71f574d47471f5781f6ee..676fab50dd2b0e3acf31c3599bfeb2e3e2f08894 100644 (file)
@@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
        /*
         * Set the size of the PKO command buffers to an odd number of
         * 64bit words. This allows the normal two word send to stay
-        * aligned and never span a comamnd word buffer.
+        * aligned and never span a command word buffer.
         */
        config.u64 = 0;
        config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
index b7fa9ae28c3659dbf457aecd7cd17255cd34f5da..42412ba0f3bfd78f1f7f6f6046fd581d4641bc56 100644 (file)
@@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
        }
 
        if (!(avail_coremask & (1 << coreid))) {
-               /* core not available, assume, that catched by simple-executive */
+               /* core not available, assume, that caught by simple-executive */
                cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
                cvmx_write_csr(CVMX_CIU_PP_RST, 0);
        }
index 4e36b6e1869c8b0125ef6a2ddf758d57c9d01fb2..43e0ba24470cd523c39a5ae5465d3ab2fc02f596 100644 (file)
@@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
 CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_SCHED=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
 # CONFIG_ALLOW_DEV_COREDUMP is not set
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
 CONFIG_NETDEVICES=y
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
 # CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS=y
 CONFIG_NLS_CODEPAGE_437=y
index 8c6f508e59de144837fc781aae73987e1fe6280f..d7b99180c6e18bd2af23552629f6e96dc5b8704a 100644 (file)
@@ -5,7 +5,7 @@
  * Written by Ralf Baechle and Andreas Busse, modified for DECstation
  * support by Paul Antoine and Harald Koerfgen.
  *
- * completly rewritten:
+ * completely rewritten:
  * Copyright (C) 1998 Harald Koerfgen
  *
  * Rewritten extensively for controller-driven IRQ support
index 5537b94572b2d29f8da730d96bef202744779e32..0d75b5a0bad42c906ba5fb1b4fdad3ce4b14c02e 100644 (file)
@@ -9,7 +9,7 @@
  * PROM library functions for acquiring/using memory descriptors given to us
  * from the ARCS firmware.  This is only used when CONFIG_ARC_MEMORY is set
  * because on some machines like SGI IP27 the ARC memory configuration data
- * completly bogus and alternate easier to use mechanisms are available.
+ * completely bogus and alternate easier to use mechanisms are available.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
index e7dc785a91ca6c2e6f64c1dc5206a06a59ba80ac..af12c1f9f1a893d8bc266035e6665bf3b36a52a0 100644 (file)
@@ -102,7 +102,7 @@ extern void cpu_probe(void);
 extern void cpu_report(void);
 
 extern const char *__cpu_name[];
-#define cpu_name_string()      __cpu_name[smp_processor_id()]
+#define cpu_name_string()      __cpu_name[raw_smp_processor_id()]
 
 struct seq_file;
 struct notifier_block;
index cf92fe7339952b43f0a8585bc9a51d351fca0f50..c4873e8594ef126aac8257e92b35e4cb1b3d118b 100644 (file)
@@ -141,7 +141,7 @@ octeon_main_processor:
 .endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
        .macro  smp_slave_setup
        .endm
index 13b0751b010a71d7ac5b1c05a225220290d04f33..a229297c880b5e41dfe03a2c63c1519b34b7da58 100644 (file)
@@ -16,7 +16,7 @@
        .endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
        .macro  smp_slave_setup
        .endm
index cf4384bfa8460d64be78b787940391b083238ea5..b0b7261ff3ad26fa8c98bfcff1ae1c868f64c5ab 100644 (file)
@@ -11,7 +11,7 @@
 #define __ASM_MACH_IP27_IRQ_H
 
 /*
- * A hardwired interrupt number is completly stupid for this system - a
+ * A hardwired interrupt number is completely stupid for this system - a
  * large configuration might have thousands if not tenthousands of
  * interrupts.
  */
index b087cb83da3a51fb88af8db48e51f4feb85918b1..f992c1db876b5e7b06985b80f6d3b1289b953399 100644 (file)
@@ -81,7 +81,7 @@
        .endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
        .macro  smp_slave_setup
        GET_NASID_ASM   t1
index bf8c3e1860e713cb548f18bdb86c0eb822229b4c..7c7708a23baa61e0a19df05d31a6a518817a4ab9 100644 (file)
@@ -27,7 +27,7 @@ enum jz_gpio_function {
 
 /*
  Usually a driver for a SoC component has to request several gpio pins and
- configure them as funcion pins.
+ configure them as function pins.
  jz_gpio_bulk_request can be used to ease this process.
  Usually one would do something like:
 
index b196825a1de9ca1f3b60df1059978569c93ad8c2..d4635391c36a28240c9fbdd900d26fdf309e0ca9 100644 (file)
@@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
  * This function returns the physical base address of the Coherence Manager
  * global control block, or 0 if no Coherence Manager is present. It provides
  * a default implementation which reads the CMGCRBase register where available,
- * and may be overriden by platforms which determine this address in a
+ * and may be overridden by platforms which determine this address in a
  * different way by defining a function with the same prototype except for the
  * name mips_cm_phys_base (without underscores).
  */
index 1f6ea8352ca90408772f0a7f72f7e213bece28ba..20621e1ca2383a5a1d4154ad99e6cf16ffb9b55c 100644 (file)
@@ -79,7 +79,7 @@ struct r2_decoder_table {
 };
 
 
-extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
                          const char *str);
 
 #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
index f7dd17d0dc22d600c7f42282d42d12ed4cf3b19d..f4f1996e0fac35d193374958683bbc9908685edf 100644 (file)
@@ -33,7 +33,7 @@
 /* Packet buffers */
 #define CVMX_FPA_PACKET_POOL               (0)
 #define CVMX_FPA_PACKET_POOL_SIZE          CVMX_FPA_POOL_0_SIZE
-/* Work queue entrys */
+/* Work queue entries */
 #define CVMX_FPA_WQE_POOL                  (1)
 #define CVMX_FPA_WQE_POOL_SIZE             CVMX_FPA_POOL_1_SIZE
 /* PKO queue command buffers */
index 19e139c9f33791f883046d5e3dcf2c2f76504359..3e982e0c397e8a5d77d32c592d621bd1c31650c3 100644 (file)
@@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
 {
        if (sizeof(void *) == 8) {
-               /* Just set the top bit, avoiding any TLB uglyness */
+               /* Just set the top bit, avoiding any TLB ugliness */
                return CASTPTR(void,
                               CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
                                            physical_address));
index 8d7a63b52ac73bacfd14fcddf838e6cb2758ff6b..3206245d1ed64d94693a743ca4e14976dcb3646a 100644 (file)
@@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
        union {
                u32             cmd_word;
                struct {
-                       u32     didn:4,         /* Destination ID */
-                               sidn:4,         /* Source ID      */
-                               pactyp:4,       /* Packet type    */
-                               tnum:5,         /* Trans Number   */
-                               coh:1,          /* Coh Transacti  */
-                               ds:2,           /* Data size      */
-                               gbr:1,          /* GBR enable     */
-                               vbpm:1,         /* VBPM message   */
+                       u32     didn:4,         /* Destination ID  */
+                               sidn:4,         /* Source ID       */
+                               pactyp:4,       /* Packet type     */
+                               tnum:5,         /* Trans Number    */
+                               coh:1,          /* Coh Transaction */
+                               ds:2,           /* Data size       */
+                               gbr:1,          /* GBR enable      */
+                               vbpm:1,         /* VBPM message    */
                                error:1,        /* Error occurred  */
-                               barr:1,         /* Barrier op     */
+                               barr:1,         /* Barrier op      */
                                rsvd:8;
                } berr_st;
        } berr_un;
index 59920b3459420192b3f34185bc4293b763293418..4a9c99050c13964a62716b2f2ac81563bc6908be 100644 (file)
@@ -147,7 +147,7 @@ struct hpc3_ethregs {
 #define HPC3_EPCFG_P1   0x000f /* Cycles to spend in P1 state for PIO */
 #define HPC3_EPCFG_P2   0x00f0 /* Cycles to spend in P2 state for PIO */
 #define HPC3_EPCFG_P3   0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST  0x1000 /* Diagnistic ram test feature bit */
+#define HPC3_EPCFG_TST  0x1000 /* Diagnostic ram test feature bit */
 
        u32 _unused2[0x1000/4 - 8];     /* padding */
 
index 26ddfff28c8e27cd53629e840502a771a9d0c6a5..105a9479ac5f70025f6be3c6cf6212fbb0e30076 100644 (file)
@@ -144,7 +144,7 @@ struct linux_tinfo {
 struct linux_vdirent {
        ULONG namelen;
        unsigned char attr;
-       char fname[32]; /* XXX imperical, should be a define */
+       char fname[32]; /* XXX empirical, should be a define */
 };
 
 /* Other stuff for files. */
@@ -179,7 +179,7 @@ struct linux_finfo {
        enum linux_devtypes   dtype;
        unsigned long         namelen;
        unsigned char         attr;
-       char                  name[32]; /* XXX imperical, should be define */
+       char                  name[32]; /* XXX empirical, should be define */
 };
 
 /* This describes the vector containing function pointers to the ARC
index e33f0363235b77e57229846f34fe398078c6f7ca..feb385180f8742f0682149e13fcebfaa762e174b 100644 (file)
@@ -355,7 +355,7 @@ struct ioc3_etxd {
 #define SSCR_PAUSE_STATE 0x40000000    /* sets when PAUSE takes effect */
 #define SSCR_RESET     0x80000000      /* reset DMA channels */
 
-/* all producer/comsumer pointers are the same bitfield */
+/* all producer/consumer pointers are the same bitfield */
 #define PROD_CONS_PTR_4K 0x00000ff8    /* for 4K buffers */
 #define PROD_CONS_PTR_1K 0x000003f8    /* for 1K buffers */
 #define PROD_CONS_PTR_OFF 3
index 5998b13e976498c42b32c34850115bb7388f533a..57ece90f8cf1edef77b2d58f892ae809a8ff6b48 100644 (file)
@@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
 /*
  * Values for field imsgtype
  */
-#define IIO_ICRB_IMSGT_XTALK   0       /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_XTALK   0       /* Incoming Message from Xtalk */
 #define IIO_ICRB_IMSGT_BTE     1       /* Incoming message from BTE    */
 #define IIO_ICRB_IMSGT_SN0NET  2       /* Incoming message from SN0 net */
 #define IIO_ICRB_IMSGT_CRB     3       /* Incoming message from CRB ???  */
index 095ecafe6bd3525444c27812937e2d085bf69b2a..7f109d4f64a4074d294f006def272008e8b71112 100644 (file)
@@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void)
 }
 
 /*
- * Is a address valid? This does a straighforward calculation rather
+ * Is a address valid? This does a straightforward calculation rather
  * than tests.
  *
  * Address valid if:
index 3129795de940b0c370c3eb6926ade8b11f878f91..24ad815c7f38d463f45e90e38417649c91a08f68 100644 (file)
 #define __NR_membarrier                        (__NR_Linux + 358)
 #define __NR_mlock2                    (__NR_Linux + 359)
 #define __NR_copy_file_range           (__NR_Linux + 360)
+#define __NR_preadv2                   (__NR_Linux + 361)
+#define __NR_pwritev2                  (__NR_Linux + 362)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            360
+#define __NR_Linux_syscalls            362
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                360
+#define __NR_O32_Linux_syscalls                362
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_membarrier                        (__NR_Linux + 318)
 #define __NR_mlock2                    (__NR_Linux + 319)
 #define __NR_copy_file_range           (__NR_Linux + 320)
+#define __NR_preadv2                   (__NR_Linux + 321)
+#define __NR_pwritev2                  (__NR_Linux + 322)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            320
+#define __NR_Linux_syscalls            322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         320
+#define __NR_64_Linux_syscalls         322
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_membarrier                        (__NR_Linux + 322)
 #define __NR_mlock2                    (__NR_Linux + 323)
 #define __NR_copy_file_range           (__NR_Linux + 324)
+#define __NR_preadv2                   (__NR_Linux + 325)
+#define __NR_pwritev2                  (__NR_Linux + 326)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            324
+#define __NR_Linux_syscalls            326
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                324
+#define __NR_N32_Linux_syscalls                326
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 1448c1f43d4e4ae6657b819a5c8ee238a7332e34..760217bbb2fa5adfc7c96fe352771ba5807bcedb 100644 (file)
@@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
        "0x04", "cpc", "0x06", "0x07"
 };
 
-/* CM3 Tag ECC transation type */
+/* CM3 Tag ECC transaction type */
 static char *cm3_tr[16] = {
        [0x0] = "ReqNoData",
        [0x1] = "0x1",
index 1f5aac7f9ec3588ec3c48a563b057351b1abfe18..3fff89ae760bac4b2a49ecbcf3877948fd4f85c6 100644 (file)
@@ -940,42 +940,42 @@ repeat:
                switch (rt) {
                case tgei_op:
                        if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
-                               do_trap_or_bp(regs, 0, "TGEI");
+                               do_trap_or_bp(regs, 0, 0, "TGEI");
 
                        MIPS_R2_STATS(traps);
 
                        break;
                case tgeiu_op:
                        if (regs->regs[rs] >= MIPSInst_UIMM(inst))
-                               do_trap_or_bp(regs, 0, "TGEIU");
+                               do_trap_or_bp(regs, 0, 0, "TGEIU");
 
                        MIPS_R2_STATS(traps);
 
                        break;
                case tlti_op:
                        if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
-                               do_trap_or_bp(regs, 0, "TLTI");
+                               do_trap_or_bp(regs, 0, 0, "TLTI");
 
                        MIPS_R2_STATS(traps);
 
                        break;
                case tltiu_op:
                        if (regs->regs[rs] < MIPSInst_UIMM(inst))
-                               do_trap_or_bp(regs, 0, "TLTIU");
+                               do_trap_or_bp(regs, 0, 0, "TLTIU");
 
                        MIPS_R2_STATS(traps);
 
                        break;
                case teqi_op:
                        if (regs->regs[rs] == MIPSInst_SIMM(inst))
-                               do_trap_or_bp(regs, 0, "TEQI");
+                               do_trap_or_bp(regs, 0, 0, "TEQI");
 
                        MIPS_R2_STATS(traps);
 
                        break;
                case tnei_op:
                        if (regs->regs[rs] != MIPSInst_SIMM(inst))
-                               do_trap_or_bp(regs, 0, "TNEI");
+                               do_trap_or_bp(regs, 0, 0, "TNEI");
 
                        MIPS_R2_STATS(traps);
 
index 2b70723071c322018dbb616a0b158bae4da85d0e..9083d63b765cf9532f3a7e42f892a3b90b50811a 100644 (file)
@@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                       struct module *me)
 {
        Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+       int (*handler)(struct module *me, u32 *location, Elf_Addr v);
        Elf_Sym *sym;
        u32 *location;
-       unsigned int i;
+       unsigned int i, type;
        Elf_Addr v;
        int res;
 
@@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        return -ENOENT;
                }
 
-               v = sym->st_value + rel[i].r_addend;
+               type = ELF_MIPS_R_TYPE(rel[i]);
+
+               if (type < ARRAY_SIZE(reloc_handlers_rela))
+                       handler = reloc_handlers_rela[type];
+               else
+                       handler = NULL;
 
-               res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+               if (!handler) {
+                       pr_err("%s: Unknown relocation type %u\n",
+                              me->name, type);
+                       return -EINVAL;
+               }
+
+               v = sym->st_value + rel[i].r_addend;
+               res = handler(me, location, v);
                if (res)
                        return res;
        }
index 1833f5171ccda092a74f6ac6a902d30e397fc985..f9b2936d598def20f5f9be027a4b1e1a8af758c4 100644 (file)
@@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
                   struct module *me)
 {
        Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+       int (*handler)(struct module *me, u32 *location, Elf_Addr v);
        Elf_Sym *sym;
        u32 *location;
-       unsigned int i;
+       unsigned int i, type;
        Elf_Addr v;
        int res;
 
@@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
                        return -ENOENT;
                }
 
-               v = sym->st_value;
+               type = ELF_MIPS_R_TYPE(rel[i]);
+
+               if (type < ARRAY_SIZE(reloc_handlers_rel))
+                       handler = reloc_handlers_rel[type];
+               else
+                       handler = NULL;
 
-               res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+               if (!handler) {
+                       pr_err("%s: Unknown relocation type %u\n",
+                              me->name, type);
+                       return -EINVAL;
+               }
+
+               v = sym->st_value;
+               res = handler(me, location, v);
                if (res)
                        return res;
        }
index d7b8dd43147a44e7a400efc4b5e2019e015f82b9..9bc1191b1ab0d32c05d7f7d15e2a6e4995262933 100644 (file)
@@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu)
 
 /*
  * MIPS performance counters can be per-TC. The control registers can
- * not be directly accessed accross CPUs. Hence if we want to do global
+ * not be directly accessed across CPUs. Hence if we want to do global
  * control, we need cross CPU calls. on_each_cpu() can help us, but we
  * can not make sure this function is called with interrupts enabled. So
  * here we pause local counters and then grab a rwlock and leave the
index f63a289977cc5f34d5ee52daf61f1065153166a1..fa3f9ebad8f40503ccd6622c8c91c56c6e6188ee 100644 (file)
@@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
        /*
         * Disable all but self interventions. The load from COHCTL is defined
         * by the interAptiv & proAptiv SUMs as ensuring that the operation
-        * resulting from the preceeding store is complete.
+        * resulting from the preceding store is complete.
         */
        uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
        uasm_i_sw(&p, t0, 0, r_pcohctl);
index eddd5fd6fdfa2ee20f50886b8fbbad9045fd496d..92880cee449e147043f4007204f286b6cd869f88 100644 (file)
@@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
         * allows us to only worry about whether an FP mode switch is in
         * progress when FP is first used in a tasks time slice. Pretty much all
         * of the mode switch overhead can thus be confined to cases where mode
-        * switches are actually occuring. That is, to here. However for the
+        * switches are actually occurring. That is, to here. However for the
         * thread performing the mode switch it may take a while...
         */
        if (num_online_cpus() > 1) {
index a56317444bdad94c73325eb1f72d75ff46af1b82..d01fe53a663850bccbea14e30208357df5aa074b 100644 (file)
@@ -596,3 +596,5 @@ EXPORT(sys_call_table)
        PTR     sys_membarrier
        PTR     sys_mlock2
        PTR     sys_copy_file_range             /* 4360 */
+       PTR     sys_preadv2
+       PTR     sys_pwritev2
index 2b2dc14610d02b58dc704ee63de6520d45ee5156..6b73ecc02597c008c0025a89b4ebf252f73c8a38 100644 (file)
@@ -434,4 +434,6 @@ EXPORT(sys_call_table)
        PTR     sys_membarrier
        PTR     sys_mlock2
        PTR     sys_copy_file_range             /* 5320 */
+       PTR     sys_preadv2
+       PTR     sys_pwritev2
        .size   sys_call_table,.-sys_call_table
index 2bf5c8593d91daad1a9ee5ffea26eb59318854d9..71f99d5f7a068220c17c9ab4da4cd6419f356b67 100644 (file)
@@ -424,4 +424,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_membarrier
        PTR     sys_mlock2
        PTR     sys_copy_file_range
+       PTR     compat_sys_preadv2              /* 6325 */
+       PTR     compat_sys_pwritev2
        .size   sysn32_call_table,.-sysn32_call_table
index c5b759e584c758a9d56acf44d7677ff74dc7ecf8..91b43eea2d5a073026ef8f4928d74e41ca0bfb07 100644 (file)
@@ -579,4 +579,6 @@ EXPORT(sys32_call_table)
        PTR     sys_membarrier
        PTR     sys_mlock2
        PTR     sys_copy_file_range             /* 4360 */
+       PTR     compat_sys_preadv2
+       PTR     compat_sys_pwritev2
        .size   sys32_call_table,.-sys32_call_table
index 37708d9af6381f8772c190569b5d64de40c7281d..27cb638f082414048b6655eb04fc36b3b064eec1 100644 (file)
@@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void)
        struct irq_domain *ipidomain;
        struct device_node *node;
 
+       /*
+        * In some cases like qemu-malta, it is desired to try SMP with
+        * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
+        * would cause a BUG_ON() to be triggered since there's no ipidomain.
+        *
+        * Since for a single core system IPIs aren't required really, skip the
+        * initialisation which should generally keep any such configurations
+        * happy and only fail hard when trying to truely run SMP.
+        */
+       if (cpumask_weight(cpu_possible_mask) == 1)
+               return 0;
+
        node = of_irq_find_parent(of_root);
        ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
 
index bf14da9f3e33b74473d2a23c3b3b388456c4bdc5..ae0c89d23ad7d3e41a068f4178b69fec492e2252 100644 (file)
@@ -56,6 +56,7 @@
 #include <asm/pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/sections.h>
+#include <asm/siginfo.h>
 #include <asm/tlbdebug.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
@@ -871,7 +872,7 @@ out:
        exception_exit(prev_state);
 }
 
-void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
        const char *str)
 {
        siginfo_t info = { 0 };
@@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        default:
                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
                die_if_kernel(b, regs);
-               force_sig(SIGTRAP, current);
+               if (si_code) {
+                       info.si_signo = SIGTRAP;
+                       info.si_code = si_code;
+                       force_sig_info(SIGTRAP, &info, current);
+               } else {
+                       force_sig(SIGTRAP, current);
+               }
        }
 }
 
@@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
                break;
        }
 
-       do_trap_or_bp(regs, bcode, "Break");
+       do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
 
 out:
        set_fs(seg);
@@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
                        tcode = (opcode >> 6) & ((1 << 10) - 1);
        }
 
-       do_trap_or_bp(regs, tcode, "Trap");
+       do_trap_or_bp(regs, tcode, 0, "Trap");
 
 out:
        set_fs(seg);
@@ -1115,19 +1122,7 @@ no_r2_instr:
        if (unlikely(compute_return_epc(regs) < 0))
                goto out;
 
-       if (get_isa16_mode(regs->cp0_epc)) {
-               unsigned short mmop[2] = { 0 };
-
-               if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
-                       status = SIGSEGV;
-               if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
-                       status = SIGSEGV;
-               opcode = mmop[0];
-               opcode = (opcode << 16) | mmop[1];
-
-               if (status < 0)
-                       status = simulate_rdhwr_mm(regs, opcode);
-       } else {
+       if (!get_isa16_mode(regs->cp0_epc)) {
                if (unlikely(get_user(opcode, epc) < 0))
                        status = SIGSEGV;
 
@@ -1142,6 +1137,18 @@ no_r2_instr:
 
                if (status < 0)
                        status = simulate_fp(regs, opcode, old_epc, old31);
+       } else if (cpu_has_mmips) {
+               unsigned short mmop[2] = { 0 };
+
+               if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
+                       status = SIGSEGV;
+               if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
+                       status = SIGSEGV;
+               opcode = mmop[0];
+               opcode = (opcode << 16) | mmop[1];
+
+               if (status < 0)
+                       status = simulate_rdhwr_mm(regs, opcode);
        }
 
        if (status < 0)
@@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
  */
 asmlinkage void do_watch(struct pt_regs *regs)
 {
+       siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
        enum ctx_state prev_state;
        u32 cause;
 
@@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
        if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
                mips_read_watch_registers();
                local_irq_enable();
-               force_sig(SIGTRAP, current);
+               force_sig_info(SIGTRAP, &info, current);
        } else {
                mips_clear_watch_registers();
                local_irq_enable();
@@ -2214,7 +2222,7 @@ void __init trap_init(void)
 
        /*
         * Copy the generic exception handlers to their final destination.
-        * This will be overriden later as suitable for a particular
+        * This will be overridden later as suitable for a particular
         * configuration.
         */
        set_handler(0x180, &except_vec3_generic, 0x80);
index 490cea569d57d0088e50e241f3465c91f07bbcc7..5c62065cbf22d610323331b8d3bd56e6e53fa793 100644 (file)
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 {
        union mips_instruction insn;
        unsigned long value;
-       unsigned int res;
+       unsigned int res, preempted;
        unsigned long origpc;
        unsigned long orig31;
        void __user *fault_addr = NULL;
@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
                                goto sigbus;
 
-                       /*
-                        * Disable preemption to avoid a race between copying
-                        * state from userland, migrating to another CPU and
-                        * updating the hardware vector register below.
-                        */
-                       preempt_disable();
-
-                       res = __copy_from_user_inatomic(fpr, addr,
-                                                       sizeof(*fpr));
-                       if (res)
-                               goto fault;
-
-                       /*
-                        * Update the hardware register if it is in use by the
-                        * task in this quantum, in order to avoid having to
-                        * save & restore the whole vector context.
-                        */
-                       if (test_thread_flag(TIF_USEDMSA))
-                               write_msa_wr(wd, fpr, df);
+                       do {
+                               /*
+                                * If we have live MSA context keep track of
+                                * whether we get preempted in order to avoid
+                                * the register context we load being clobbered
+                                * by the live context as it's saved during
+                                * preemption. If we don't have live context
+                                * then it can't be saved to clobber the value
+                                * we load.
+                                */
+                               preempted = test_thread_flag(TIF_USEDMSA);
+
+                               res = __copy_from_user_inatomic(fpr, addr,
+                                                               sizeof(*fpr));
+                               if (res)
+                                       goto fault;
 
-                       preempt_enable();
+                               /*
+                                * Update the hardware register if it is in use
+                                * by the task in this quantum, in order to
+                                * avoid having to save & restore the whole
+                                * vector context.
+                                */
+                               preempt_disable();
+                               if (test_thread_flag(TIF_USEDMSA)) {
+                                       write_msa_wr(wd, fpr, df);
+                                       preempted = 0;
+                               }
+                               preempt_enable();
+                       } while (preempted);
                        break;
 
                case msa_st_op:
index a08c439462472e3a2e441c1238d62cdee2988ed7..e0e1d0a611fc2bd102d10ad2ecec3ae3f6951551 100644 (file)
@@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
 
-       /* Alocate new kernel and user ASIDs if needed */
+       /* Allocate new kernel and user ASIDs if needed */
 
        local_irq_save(flags);
 
index ad988000563f264d443b92bd0785e40ba78fe95d..c4038d2a724c0df9746a75c67c358deb3d620071 100644 (file)
@@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
        kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
 
        /*
-        * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+        * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
         */
        kvm_write_c0_guest_intctl(cop0, 0xFC000000);
 
index ad3c73436777f0c3103c887827c4547d888f5941..47d26c805eac5b4f91d600eee67075ed980ab250 100644 (file)
@@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
 {
        assert(xm);             /* we don't gen exact zeros (probably should) */
 
-       assert((xm >> (DP_FBITS + 1 + 3)) == 0);        /* no execess */
+       assert((xm >> (DP_FBITS + 1 + 3)) == 0);        /* no excess */
        assert(xm & (DP_HIDDEN_BIT << 3));
 
        if (xe < DP_EMIN) {
@@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
        /* strip grs bits */
        xm >>= 3;
 
-       assert((xm >> (DP_FBITS + 1)) == 0);    /* no execess */
+       assert((xm >> (DP_FBITS + 1)) == 0);    /* no excess */
        assert(xe >= DP_EMIN);
 
        if (xe > DP_EMAX) {
@@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
                        ieee754_setcx(IEEE754_UNDERFLOW);
                return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
        } else {
-               assert((xm >> (DP_FBITS + 1)) == 0);    /* no execess */
+               assert((xm >> (DP_FBITS + 1)) == 0);    /* no excess */
                assert(xm & DP_HIDDEN_BIT);
 
                return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
index def00ffc50fcc7bc4740933215cdf5872940a450..e0b2c450b9634caadcd24d5a25d3f58d4ad2f805 100644 (file)
@@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
 {
        assert(xm);             /* we don't gen exact zeros (probably should) */
 
-       assert((xm >> (SP_FBITS + 1 + 3)) == 0);        /* no execess */
+       assert((xm >> (SP_FBITS + 1 + 3)) == 0);        /* no excess */
        assert(xm & (SP_HIDDEN_BIT << 3));
 
        if (xe < SP_EMIN) {
@@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
        /* strip grs bits */
        xm >>= 3;
 
-       assert((xm >> (SP_FBITS + 1)) == 0);    /* no execess */
+       assert((xm >> (SP_FBITS + 1)) == 0);    /* no excess */
        assert(xe >= SP_EMIN);
 
        if (xe > SP_EMAX) {
@@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
                        ieee754_setcx(IEEE754_UNDERFLOW);
                return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
        } else {
-               assert((xm >> (SP_FBITS + 1)) == 0);    /* no execess */
+               assert((xm >> (SP_FBITS + 1)) == 0);    /* no excess */
                assert(xm & SP_HIDDEN_BIT);
 
                return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
index dc7c5a5214a9e1a9fb06600ba67f55b7054e1425..026cb59a914dbc5f52a2c7200ae85f843081441c 100644 (file)
@@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
        return 1;
 }
 
-/* XXX Check with wje if the Indy caches can differenciate between
+/* XXX Check with wje if the Indy caches can differentiate between
    writeback + invalidate and just invalidate. */
 static struct bcache_ops indy_sc_ops = {
        .bc_enable = indy_sc_enable,
index 5037d5868cef7ef47f1baaa74dc84b7da410c7d5..c17d7627f872bffd148dc093386a3cf8efd4ebd8 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/cpu.h>
 #include <asm/cpu-type.h>
 #include <asm/bootinfo.h>
+#include <asm/hazards.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/tlb.h>
@@ -486,6 +487,10 @@ static void r4k_tlb_configure(void)
         *     be set to fixed-size pages.
         */
        write_c0_pagemask(PM_DEFAULT_MASK);
+       back_to_back_c0_hazard();
+       if (read_c0_pagemask() != PM_DEFAULT_MASK)
+               panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
+
        write_c0_wired(0);
        if (current_cpu_type() == CPU_R10000 ||
            current_cpu_type() == CPU_R12000 ||
index 5a04b6f5c6fb8a2cc52d986f59042ad8cd585850..84c6e3fda84af1f87f025c63709ecd48327fb75e 100644 (file)
@@ -12,7 +12,7 @@
  * Copyright (C) 2011  MIPS Technologies, Inc.
  *
  * ... and the days got worse and worse and now you see
- * I've gone completly out of my mind.
+ * I've gone completely out of my mind.
  *
  * They're coming to take me a away haha
  * they're coming to take me a away hoho hihi haha
index 8d0eb264324809b828f1463cd604e044e2e1df42..f1f88291451ec59adb4a5f28f9d19ea336669932 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright (C) 2000 by Silicon Graphics, Inc.
  * Copyright (C) 2004 by Christoph Hellwig
  *
- * On SGI IP27 the ARC memory configuration data is completly bogus but
+ * On SGI IP27 the ARC memory configuration data is completely bogus but
  * alternate easier to use mechanisms are available.
  */
 #include <linux/init.h>
index c2cfcb121e346590f0de5d805d9952ac69b42d96..2fcefe720283001789483f9be39b8924099c647d 100644 (file)
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
                  "=r" (charcnt),       /* %1  Output */
                  "=r" (dwordcnt),      /* %2  Output */
                  "=r" (fill8reg),      /* %3  Output */
-                 "=r" (wrkrega)        /* %4  Output */
+                 "=&r" (wrkrega)       /* %4  Output only */
                : "r" (c),              /* %5  Input */
                  "0" (s),              /* %0  Input/Output */
                  "1" (count)           /* %1  Input/Output */
index bd3c873951a1ad2e890a77a57172d2ea7aedee71..88cfaa8af78ea2af3192472920100a4ac1de8207 100644 (file)
@@ -4,8 +4,8 @@ config PARISC
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
-       select HAVE_FUNCTION_TRACER if 64BIT
-       select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
        select ARCH_WANT_FRAME_POINTERS
        select RTC_CLASS
        select RTC_DRV_GENERIC
index bc989e522a045c17ca4514478b7cb5ef9d77fc4d..68b7cbd0810a77bf321f524b9c8809b508f864d6 100644 (file)
@@ -2,9 +2,13 @@ menu "Kernel hacking"
 
 source "lib/Kconfig.debug"
 
+config TRACE_IRQFLAGS_SUPPORT
+       def_bool y
+
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
+       default y
        help
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
index 965a0999fc4c081228a34a61daec7f1ef032a107..75cb451b1f03069ccd2e77fae86cb1e4da6b7922 100644 (file)
@@ -62,9 +62,7 @@ cflags-y      += -mdisable-fpregs
 
 # Without this, "ld -r" results in .text sections that are too big
 # (> 0x40000) for branches to reach stubs.
-ifndef CONFIG_FUNCTION_TRACER
-  cflags-y     += -ffunction-sections
-endif
+cflags-y       += -ffunction-sections
 
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
index 544ed8ef87ebbb4da274c7d8fb8e82464fe3fcda..24cd81d58d706faafe469c10a85f1182a28b958a 100644 (file)
@@ -4,23 +4,7 @@
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 
-/*
- * Stack of return addresses for functions of a thread.
- * Used in struct thread_info
- */
-struct ftrace_ret_stack {
-       unsigned long ret;
-       unsigned long func;
-       unsigned long long calltime;
-};
-
-/*
- * Primary handler of a function return.
- * It relays on ftrace_return_to_handler.
- * Defined in entry.S
- */
-extern void return_to_handler(void);
-
+#define MCOUNT_INSN_SIZE 4
 
 extern unsigned long return_address(unsigned int);
 
index d4dd6e58682ce582f9b6581095fb11e59dee2766..7955e43f3f3f27558da65b100b9c3f64ebe26b02 100644 (file)
@@ -44,20 +44,18 @@ static inline long access_ok(int type, const void __user * addr,
 #define LDD_USER(ptr)          BUILD_BUG()
 #define STD_KERNEL(x, ptr)     __put_kernel_asm64(x, ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x, ptr)
-#define ASM_WORD_INSN          ".word\t"
 #else
 #define LDD_KERNEL(ptr)                __get_kernel_asm("ldd", ptr)
 #define LDD_USER(ptr)          __get_user_asm("ldd", ptr)
 #define STD_KERNEL(x, ptr)     __put_kernel_asm("std", x, ptr)
 #define STD_USER(x, ptr)       __put_user_asm("std", x, ptr)
-#define ASM_WORD_INSN          ".dword\t"
 #endif
 
 /*
- * The exception table contains two values: the first is an address
- * for an instruction that is allowed to fault, and the second is
- * the address to the fixup routine. Even on a 64bit kernel we could
- * use a 32bit (unsigned int) address here.
+ * The exception table contains two values: the first is the relative offset to
+ * the address of the instruction that is allowed to fault, and the second is
+ * the relative offset to the address of the fixup routine. Since relative
+ * addresses are used, 32bit values are sufficient even on 64bit kernel.
  */
 
 #define ARCH_HAS_RELATIVE_EXTABLE
@@ -77,6 +75,7 @@ struct exception_table_entry {
  */
 struct exception_data {
        unsigned long fault_ip;
+       unsigned long fault_gp;
        unsigned long fault_space;
        unsigned long fault_addr;
 };
index ff87b4603e3dc7d2b60caa2a0047e3eada177397..69a11183d48d4da5cf6f9d961d31b7cb91589480 100644 (file)
@@ -15,11 +15,7 @@ ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_cache.o = -pg
-CFLAGS_REMOVE_irq.o = -pg
-CFLAGS_REMOVE_pacache.o = -pg
 CFLAGS_REMOVE_perf.o = -pg
-CFLAGS_REMOVE_traps.o = -pg
-CFLAGS_REMOVE_unaligned.o = -pg
 CFLAGS_REMOVE_unwind.o = -pg
 endif
 
index d2f62570a7b16d4f4c6321515d980f048cd278f2..78d30d2ea2d8bb24116a7639a3d4bc5f90400058 100644 (file)
@@ -299,6 +299,7 @@ int main(void)
 #endif
        BLANK();
        DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
+       DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
        DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
        DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
        BLANK();
index 91c2a39cd5aab98547424685940d2b099369fbf9..67001277256c6a51c1bf67d692d29ef1d480aadf 100644 (file)
@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
        if (!mapping)
                return;
 
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page->index;
 
        /* We have carefully arranged in arch_get_unmapped_area() that
         * *any* mappings of a file are always congruently mapped (whether
index 623496c117564cdbc7f939dea4ff777e114212ac..39127d3e70e56f2295b6e288f6642ed9908bcfbd 100644 (file)
@@ -1970,43 +1970,98 @@ pt_regs_ok:
        b       intr_restore
        copy    %r25,%r16
 
-       .import schedule,code
 syscall_do_resched:
-       BL      schedule,%r2
+       load32  syscall_check_resched,%r2 /* if resched, we start over again */
+       load32  schedule,%r19
+       bv      %r0(%r19)               /* jumps to schedule() */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #else
        nop
 #endif
-       b       syscall_check_resched   /* if resched, we start over again */
-       nop
 ENDPROC(syscall_exit)
 
 
 #ifdef CONFIG_FUNCTION_TRACER
+
        .import ftrace_function_trampoline,code
-ENTRY(_mcount)
-       copy    %r3, %arg2
+       .align L1_CACHE_BYTES
+       .globl mcount
+       .type  mcount, @function
+ENTRY(mcount)
+_mcount:
+       .export _mcount,data
+       .proc
+       .callinfo caller,frame=0
+       .entry
+       /*
+        * The 64bit mcount() function pointer needs 4 dwords, of which the
+        * first two are free.  We optimize it here and put 2 instructions for
+        * calling mcount(), and 2 instructions for ftrace_stub().  That way we
+        * have all on one L1 cacheline.
+        */
        b       ftrace_function_trampoline
+       copy    %r3, %arg2      /* caller original %sp */
+ftrace_stub:
+       .globl ftrace_stub
+        .type  ftrace_stub, @function
+#ifdef CONFIG_64BIT
+       bve     (%rp)
+#else
+       bv      %r0(%rp)
+#endif
        nop
-ENDPROC(_mcount)
+#ifdef CONFIG_64BIT
+       .dword mcount
+       .dword 0 /* code in head.S puts value of global gp here */
+#endif
+       .exit
+       .procend
+ENDPROC(mcount)
 
+       .align 8
+       .globl return_to_handler
+       .type  return_to_handler, @function
 ENTRY(return_to_handler)
-       load32  return_trampoline, %rp
-       copy    %ret0, %arg0
-       copy    %ret1, %arg1
-       b       ftrace_return_to_handler
-       nop
-return_trampoline:
-       copy    %ret0, %rp
-       copy    %r23, %ret0
-       copy    %r24, %ret1
+       .proc
+       .callinfo caller,frame=FRAME_SIZE
+       .entry
+       .export parisc_return_to_handler,data
+parisc_return_to_handler:
+       copy %r3,%r1
+       STREG %r0,-RP_OFFSET(%sp)       /* store 0 as %rp */
+       copy %sp,%r3
+       STREGM %r1,FRAME_SIZE(%sp)
+       STREG %ret0,8(%r3)
+       STREG %ret1,16(%r3)
 
-.globl ftrace_stub
-ftrace_stub:
+#ifdef CONFIG_64BIT
+       loadgp
+#endif
+
+       /* call ftrace_return_to_handler(0) */
+#ifdef CONFIG_64BIT
+       ldo -16(%sp),%ret1              /* Reference param save area */
+#endif
+       BL ftrace_return_to_handler,%r2
+       ldi 0,%r26
+       copy %ret0,%rp
+
+       /* restore original return values */
+       LDREG 8(%r3),%ret0
+       LDREG 16(%r3),%ret1
+
+       /* return from function */
+#ifdef CONFIG_64BIT
+       bve     (%rp)
+#else
        bv      %r0(%rp)
-       nop
+#endif
+       LDREGM -FRAME_SIZE(%sp),%r3
+       .exit
+       .procend
 ENDPROC(return_to_handler)
+
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_IRQSTACKS
index 559d400f93859ac2ffe096006a2099f0b2137e62..b13f9ec6f2946506c2b42ef4748b447652db81c1 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Code for tracing calls in Linux kernel.
- * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
  *
  * based on code for x86 which is:
  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 #include <linux/init.h>
 #include <linux/ftrace.h>
 
+#include <asm/assembly.h>
 #include <asm/sections.h>
 #include <asm/ftrace.h>
 
 
-
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
-{
-       int index;
-
-       if (!current->ret_stack)
-               return -EBUSY;
-
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
-               return -EBUSY;
-       }
-
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
-
-       return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
-       int index;
-
-       index = current->curr_ret_stack;
-
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)
-                       dereference_function_descriptor(&panic);
-               return;
-       }
-
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(unsigned long retval0,
-                                      unsigned long retval1)
-{
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
-
-       pop_return_trace(&trace, &ret);
-       trace.rettime = local_clock();
-       ftrace_graph_return(&trace);
-
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)
-                       dereference_function_descriptor(&panic);
-       }
-
-       /* HACK: we hand over the old functions' return values
-          in %r23 and %r24. Assembly in entry.S will take care
-          and move those to their final registers %ret0 and %ret1 */
-       asm( "copy %0, %%r23 \n\t"
-            "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
-
-       return ret;
-}
-
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
        unsigned long old;
-       unsigned long long calltime;
        struct ftrace_graph_ent trace;
+       extern int parisc_return_to_handler;
 
        if (unlikely(ftrace_graph_is_dead()))
                return;
@@ -119,64 +36,47 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
 
        old = *parent;
-       *parent = (unsigned long)
-                 dereference_function_descriptor(&return_to_handler);
 
-       if (unlikely(!__kernel_text_address(old))) {
-               ftrace_graph_stop();
-               *parent = old;
-               WARN_ON(1);
-               return;
-       }
-
-       calltime = local_clock();
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
 
-       if (push_return_trace(old, calltime,
-                               self_addr, &trace.depth) == -EBUSY) {
-               *parent = old;
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
                return;
-       }
 
-       trace.func = self_addr;
+        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+                       0 ) == -EBUSY)
+                return;
 
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
-               *parent = old;
-       }
+       /* activate parisc_return_to_handler() as return point */
+       *parent = (unsigned long) &parisc_return_to_handler;
 }
-
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-
-void ftrace_function_trampoline(unsigned long parent,
+void notrace ftrace_function_trampoline(unsigned long parent,
                                unsigned long self_addr,
                                unsigned long org_sp_gr3)
 {
-       extern ftrace_func_t ftrace_trace_function;
+       extern ftrace_func_t ftrace_trace_function;  /* depends on CONFIG_DYNAMIC_FTRACE */
+       extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 
        if (ftrace_trace_function != ftrace_stub) {
-               ftrace_trace_function(parent, self_addr);
+               /* struct ftrace_ops *op, struct pt_regs *regs); */
+               ftrace_trace_function(parent, self_addr, NULL, NULL);
                return;
        }
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       if (ftrace_graph_entry && ftrace_graph_return) {
-               unsigned long sp;
+       if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+               ftrace_graph_entry != ftrace_graph_entry_stub) {
                unsigned long *parent_rp;
 
-                asm volatile ("copy %%r30, %0" : "=r"(sp));
-               /* sanity check: is stack pointer which we got from
-                  assembler function in entry.S in a reasonable
-                  range compared to current stack pointer? */
-               if ((sp - org_sp_gr3) > 0x400)
-                       return;
-
                /* calculate pointer to %rp in stack */
-               parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+               parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
                /* sanity check: parent_rp should hold parent */
                if (*parent_rp != parent)
                        return;
-               
+
                prepare_ftrace_return(parent_rp, self_addr);
                return;
        }
index 75aa0db9f69efe2fb3fa00d10d6fc66bd3f2b923..bbbe360b458f511c068620db2dd670a770ea8362 100644 (file)
@@ -129,6 +129,15 @@ $pgt_fill_loop:
        /* And the stack pointer too */
        ldo             THREAD_SZ_ALGN(%r6),%sp
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
+       .import _mcount,data
+       /* initialize mcount FPTR */
+       /* Get the global data pointer */
+       loadgp
+       load32          PA(_mcount), %r10
+       std             %dp,0x18(%r10)
+#endif
+
 #ifdef CONFIG_SMP
        /* Set the smp rendezvous address into page zero.
        ** It would be safer to do this in init_smp_config() but
index b9d75d9fa9ace520e472874b5dcb0f0b0aad440e..a0ecdb4abcc878b3805d7a2d0f845272b1fc372d 100644 (file)
@@ -660,6 +660,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                        }
                        *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
                        break;
+               case R_PARISC_PCREL32:
+                       /* 32-bit PC relative address */
+                       *loc = val - dot - 8 + addend;
+                       break;
 
                default:
                        printk(KERN_ERR "module %s: Unknown relocation: %u\n",
@@ -788,6 +792,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                        CHECK_RELOC(val, 22);
                        *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
                        break;
+               case R_PARISC_PCREL32:
+                       /* 32-bit PC relative address */
+                       *loc = val - dot - 8 + addend;
+                       break;
                case R_PARISC_DIR64:
                        /* 64-bit effective address */
                        *loc64 = val + addend;
index 568b2c61ea0208de80bcd1f5fdc54f7aafa95a30..3cad8aadc69e7a1159c829a7e5e5cea9f6a1297b 100644 (file)
@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
-/* Global fixups */
-extern void fixup_get_user_skip_1(void);
-extern void fixup_get_user_skip_2(void);
-extern void fixup_put_user_skip_1(void);
-extern void fixup_put_user_skip_2(void);
+/* Global fixups - defined as int to avoid creation of function pointers */
+extern int fixup_get_user_skip_1;
+extern int fixup_get_user_skip_2;
+extern int fixup_put_user_skip_1;
+extern int fixup_put_user_skip_2;
 EXPORT_SYMBOL(fixup_get_user_skip_1);
 EXPORT_SYMBOL(fixup_get_user_skip_2);
 EXPORT_SYMBOL(fixup_put_user_skip_1);
index c976ebfe2269db83e7efcc803bcd14e390695e3b..57b4836b7ecd898e10197aa0d473ea6107b9fb42 100644 (file)
@@ -344,7 +344,7 @@ tracesys_next:
 #endif
 
        cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
-       comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
+       comiclr,>>      __NR_Linux_syscalls, %r20, %r0
        b,n     .Ltracesys_nosys
 
        LDREGX  %r20(%r19), %r19
index 16e0735e2f46bebb2b3a806d4b9d7d2f1a01c786..97d6b208e1294d23f52d825ee14ca28694f7876e 100644 (file)
@@ -795,6 +795,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
            if (fault_space == 0 && !faulthandler_disabled())
            {
+               /* Clean up and return if in exception table. */
+               if (fixup_exception(regs))
+                       return;
                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
                parisc_terminate("Kernel Fault", regs, code, fault_address);
            }
index 536ef66bb94b5aa8c46800a968514184b5e2d85d..1052b747e011336621c6ca9f7c9eaf78af7af6df 100644 (file)
@@ -26,6 +26,7 @@
 
 #ifdef CONFIG_SMP
        .macro  get_fault_ip t1 t2
+       loadgp
        addil LT%__per_cpu_offset,%r27
        LDREG RT%__per_cpu_offset(%r1),\t1
        /* t2 = smp_processor_id() */
        LDREG RT%exception_data(%r1),\t1
        /* t1 = this_cpu_ptr(&exception_data) */
        add,l \t1,\t2,\t1
+       /* %r27 = t1->fault_gp - restore gp */
+       LDREG EXCDATA_GP(\t1), %r27
        /* t1 = t1->fault_ip */
        LDREG EXCDATA_IP(\t1), \t1
        .endm
 #else
        .macro  get_fault_ip t1 t2
+       loadgp
        /* t1 = this_cpu_ptr(&exception_data) */
        addil LT%exception_data,%r27
        LDREG RT%exception_data(%r1),\t2
+       /* %r27 = t2->fault_gp - restore gp */
+       LDREG EXCDATA_GP(\t2), %r27
        /* t1 = t2->fault_ip */
        LDREG EXCDATA_IP(\t2), \t1
        .endm
index 26fac9c671c9375ab9ae1663617c237186f55aba..16dbe81c97c9005df3cbb91045ccb6ed20878930 100644 (file)
@@ -145,6 +145,7 @@ int fixup_exception(struct pt_regs *regs)
                struct exception_data *d;
                d = this_cpu_ptr(&exception_data);
                d->fault_ip = regs->iaoq[0];
+               d->fault_gp = regs->gr[27];
                d->fault_space = regs->isr;
                d->fault_addr = regs->ior;
 
index 3c07d6b968772bc6de99bada8d83578ffe172c3d..6b3e7c6ee096efb9c1df64915de09d2a29dcef13 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/swap.h>
 #include <linux/unistd.h>
 #include <linux/nodemask.h>    /* for node_online_map */
-#include <linux/pagemap.h>     /* for release_pages and page_cache_release */
+#include <linux/pagemap.h>     /* for release_pages */
 #include <linux/compat.h>
 
 #include <asm/pgalloc.h>
index 3fa9df70aa20dfa01e90eb1a6af06171a1d8fbde..2fc5d4db503ccd03dfb40923267a2b763500cd22 100644 (file)
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(mlock2)
 SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
index 1f2594d456054262bd2bc4a9eef265c983971afa..cf12c580f6b286b957b0280d8174cc3d8c203d2f 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            380
+#define NR_syscalls            382
 
 #define __NR__exit __NR_exit
 
index e4396a7d0f7cf5627a92ea8c07756aba6bc52c7a..4afe66aa1400d5d7e3783e696a0a1de12d3a316c 100644 (file)
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
            "andc       %1,%1,%2\n\t"
            "popcntd    %0,%1"
                : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
-               : "r" (bits));
+               : "b" (bits));
 
        return leading_zero_bits;
 }
index 8dde19962a5b49fbce13685656911c8f47622f3f..f63c96cd360863ac312aca929475de7629695169 100644 (file)
@@ -31,6 +31,7 @@
 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
                                        0x00000040
 
+/* Reserved - do not use               0x00000004 */
 #define PPC_FEATURE_TRUE_LE            0x00000002
 #define PPC_FEATURE_PPC_LE             0x00000001
 
index 940290d45b087220711cd0a508ae5c4223b21951..e9f5f41aa55a1bc206749d56e75bd8edbb1d4068 100644 (file)
 #define __NR_membarrier                365
 #define __NR_mlock2            378
 #define __NR_copy_file_range   379
+#define __NR_preadv2           380
+#define __NR_pwritev2          381
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 7030b035905dbf85b03bd36fa4bdc2a4a14982de..a15fe1d4e84aec9955622b603823fe44633da98d 100644 (file)
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
        unsigned long   cpu_features;   /* CPU_FTR_xxx bit */
        unsigned long   mmu_features;   /* MMU_FTR_xxx bit */
        unsigned int    cpu_user_ftrs;  /* PPC_FEATURE_xxx bit */
+       unsigned int    cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
        unsigned char   pabyte;         /* byte number in ibm,pa-features */
        unsigned char   pabit;          /* bit number (big-endian) */
        unsigned char   invert;         /* if 1, pa bit set => clear feature */
 } ibm_pa_features[] __initdata = {
-       {0, 0, PPC_FEATURE_HAS_MMU,     0, 0, 0},
-       {0, 0, PPC_FEATURE_HAS_FPU,     0, 1, 0},
-       {CPU_FTR_CTRL, 0, 0,            0, 3, 0},
-       {CPU_FTR_NOEXECUTE, 0, 0,       0, 6, 0},
-       {CPU_FTR_NODSISRALIGN, 0, 0,    1, 1, 1},
-       {0, MMU_FTR_CI_LARGE_PAGE, 0,   1, 2, 0},
-       {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_MMU, 0,          0, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_FPU, 0,          0, 1, 0},
+       {CPU_FTR_CTRL, 0, 0, 0,                 0, 3, 0},
+       {CPU_FTR_NOEXECUTE, 0, 0, 0,            0, 6, 0},
+       {CPU_FTR_NODSISRALIGN, 0, 0, 0,         1, 1, 1},
+       {0, MMU_FTR_CI_LARGE_PAGE, 0, 0,                1, 2, 0},
+       {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
        /*
-        * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
-        * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
-        * which is 0 if the kernel doesn't support TM.
+        * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+        * we don't want to turn on TM here, so we use the *_COMP versions
+        * which are 0 if the kernel doesn't support TM.
         */
-       {CPU_FTR_TM_COMP, 0, 0,         22, 0, 0},
+       {CPU_FTR_TM_COMP, 0, 0,
+        PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
                if (bit ^ fp->invert) {
                        cur_cpu_spec->cpu_features |= fp->cpu_features;
                        cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features |= fp->mmu_features;
                } else {
                        cur_cpu_spec->cpu_features &= ~fp->cpu_features;
                        cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features &= ~fp->mmu_features;
                }
        }
index dfa863876778144baa5cb99ac65e61c4ca60ec70..6ca5f0525e5701518b5ebba4cc4b40d387b3a68b 100644 (file)
@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
                return -ENOMEM;
 
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = SPUFS_MAGIC;
        sb->s_op = &s_ops;
        sb->s_fs_info = info;
index aad23e3dff2c17b0bba5a7d2740ea47c21839480..bf24ab1889215abed3b8759dae2d45177ab960ab 100644 (file)
@@ -4,6 +4,9 @@ config MMU
 config ZONE_DMA
        def_bool y
 
+config CPU_BIG_ENDIAN
+       def_bool y
+
 config LOCKDEP_SUPPORT
        def_bool y
 
index 0f3da2cb2bd63c0d26649776fd53d844f3153d18..255c7eec44810420ae1b4d7608142db58fbad7e7 100644 (file)
@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
        sbi->uid = current_uid();
        sbi->gid = current_gid();
        sb->s_fs_info = sbi;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = HYPFS_MAGIC;
        sb->s_op = &hypfs_s_ops;
        if (hypfs_parse_options(data, sb))
index d29ad9545b4187a18c660e1ad62922b052447478..081b2ad99d737780a9d7d31006a51228896172ba 100644 (file)
@@ -11,7 +11,7 @@ typedef struct {
        spinlock_t list_lock;
        struct list_head pgtable_list;
        struct list_head gmap_list;
-       unsigned long asce_bits;
+       unsigned long asce;
        unsigned long asce_limit;
        unsigned long vdso_base;
        /* The mmu context allocates 4K page tables. */
index d321469eeda7316205f019b7201f54f68a71d79b..c837b79b455dc8615f55957e3475c00c35f56a17 100644 (file)
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
 #endif
-       if (mm->context.asce_limit == 0) {
+       switch (mm->context.asce_limit) {
+       case 1UL << 42:
+               /*
+                * forked 3-level task, fall through to set new asce with new
+                * mm->pgd
+                */
+       case 0:
                /* context created by exec, set asce limit to 4TB */
-               mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-                       _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
                mm->context.asce_limit = STACK_TOP_MAX;
-       } else if (mm->context.asce_limit == (1UL << 31)) {
+               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                                  _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+               break;
+       case 1UL << 53:
+               /* forked 4-level task, set new asce with new mm->pgd */
+               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                                  _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+               break;
+       case 1UL << 31:
+               /* forked 2-level compat task, set new asce with new mm->pgd */
+               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                                  _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+               /* pgd_alloc() did not increase mm->nr_pmds */
                mm_inc_nr_pmds(mm);
        }
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
 
 static inline void set_user_asce(struct mm_struct *mm)
 {
-       S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+       S390_lowcore.user_asce = mm->context.asce;
        if (current->thread.mm_segment.ar4)
                __ctl_load(S390_lowcore.user_asce, 7, 7);
        set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        int cpu = smp_processor_id();
 
-       S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+       S390_lowcore.user_asce = next->context.asce;
        if (prev == next)
                return;
        if (MACHINE_HAS_TLB_LC)
index b6bfa169a002c051649a2909281fe8c203a00ee7..535a46d46d2894e486284b4a8eb74df38738305f 100644 (file)
@@ -44,7 +44,8 @@ struct zpci_fmb {
        u64 rpcit_ops;
        u64 dma_rbytes;
        u64 dma_wbytes;
-} __packed __aligned(64);
+       u64 pad[2];
+} __packed __aligned(128);
 
 enum zpci_state {
        ZPCI_FN_STATE_RESERVED,
index 9b3d9b6099f2a8dd76a14f874dad8cc2715c2267..da34cb6b1f3b0347f8faef06a91d5a050b2140b9 100644 (file)
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
        return _REGION2_ENTRY_EMPTY;
 }
 
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 {
index d6fd22ea270db0e5446a41aaabc3d9ce1445e350..18cdede1aedab9bfb841471a51dd241751421110 100644 (file)
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
        regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
-       crst_table_downgrade(current->mm, 1UL << 31);                   \
+       crst_table_downgrade(current->mm);                              \
        execve_tail();                                                  \
 } while (0)
 
index 781a9cf9b002930429697dd973b214b844a5c244..e10f8337367b31c3de87b045937070e5dd975f94 100644 (file)
@@ -13,4 +13,6 @@
 #define __NR_seccomp_exit_32   __NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
+#include <asm-generic/seccomp.h>
+
 #endif /* _ASM_S390_SECCOMP_H */
index ca148f7c3eaae0d5c7f5483aef08f26af1bc9ca0..a2e6ef32e05445b190b444cb249db44f638e10d2 100644 (file)
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
 static inline void __tlb_flush_kernel(void)
 {
        if (MACHINE_HAS_IDTE)
-               __tlb_flush_idte((unsigned long) init_mm.pgd |
-                                init_mm.context.asce_bits);
+               __tlb_flush_idte(init_mm.context.asce);
        else
                __tlb_flush_global();
 }
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
 static inline void __tlb_flush_kernel(void)
 {
        if (MACHINE_HAS_TLB_LC)
-               __tlb_flush_idte_local((unsigned long) init_mm.pgd |
-                                      init_mm.context.asce_bits);
+               __tlb_flush_idte_local(init_mm.context.asce);
        else
                __tlb_flush_local();
 }
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
         * only ran on the local cpu.
         */
        if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
-               __tlb_flush_asce(mm, (unsigned long) mm->pgd |
-                                mm->context.asce_bits);
+               __tlb_flush_asce(mm, mm->context.asce);
        else
                __tlb_flush_full(mm);
 }
index d4549c9645892aef684c60ac73a2c8d22886e58e..e5f50a7d2f4eb07fd2037c605e4c7e87160455b2 100644 (file)
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                        if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                                return;
                        local_irq_restore(flags);
+                       continue;
                }
                /* Check if the lock owner is running. */
                if (first_diag && cpu_is_preempted(~owner)) {
index 69247b4dcc43ab94a3ea7427c5f3998c7696ca86..cace818d86eb95bae2cd6f36038f177a5548a926 100644 (file)
@@ -23,7 +23,7 @@
 /**
  * gmap_alloc - allocate a guest address space
  * @mm: pointer to the parent mm_struct
- * @limit: maximum size of the gmap address space
+ * @limit: maximum address of the gmap address space
  *
  * Returns a guest address space structure.
  */
@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
        if ((from | to | len) & (PMD_SIZE - 1))
                return -EINVAL;
        if (len == 0 || from + len < from || to + len < to ||
-           from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
+           from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
                return -EINVAL;
 
        flush = 0;
index c7b0451397d6fbf6ac65f75c408c079601d0ad0a..2489b2e917c81a84789558b363707442989275c1 100644 (file)
@@ -89,7 +89,8 @@ void __init paging_init(void)
                asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
                pgd_type = _REGION3_ENTRY_EMPTY;
        }
-       S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+       init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+       S390_lowcore.kernel_asce = init_mm.context.asce;
        clear_table((unsigned long *) init_mm.pgd, pgd_type,
                    sizeof(unsigned long)*2048);
        vmem_map_init();
index 45c4daa49930f9a2bdfaf1d07a6ab5e002aa99c2..89cf09e5f16806a1422f49d52c59c3fda6bcaab7 100644 (file)
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
        if (!(flags & MAP_FIXED))
                addr = 0;
        if ((addr + len) >= TASK_SIZE)
-               return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
+               return crst_table_upgrade(current->mm);
        return 0;
 }
 
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
                return area;
        if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+               rc = crst_table_upgrade(mm);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
                return area;
        if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+               rc = crst_table_upgrade(mm);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area_topdown(filp, addr, len,
index f6c3de26cda85629350350c4d33069c53e9713c4..e8b5962ac12ab8035797829f6940b9c1c7cfe227 100644 (file)
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
        __tlb_flush_local();
 }
 
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
 {
        unsigned long *table, *pgd;
-       unsigned long entry;
-       int flush;
 
-       BUG_ON(limit > TASK_MAX_SIZE);
-       flush = 0;
-repeat:
+       /* upgrade should only happen from 3 to 4 levels */
+       BUG_ON(mm->context.asce_limit != (1UL << 42));
+
        table = crst_table_alloc(mm);
        if (!table)
                return -ENOMEM;
+
        spin_lock_bh(&mm->page_table_lock);
-       if (mm->context.asce_limit < limit) {
-               pgd = (unsigned long *) mm->pgd;
-               if (mm->context.asce_limit <= (1UL << 31)) {
-                       entry = _REGION3_ENTRY_EMPTY;
-                       mm->context.asce_limit = 1UL << 42;
-                       mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-                                               _ASCE_USER_BITS |
-                                               _ASCE_TYPE_REGION3;
-               } else {
-                       entry = _REGION2_ENTRY_EMPTY;
-                       mm->context.asce_limit = 1UL << 53;
-                       mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-                                               _ASCE_USER_BITS |
-                                               _ASCE_TYPE_REGION2;
-               }
-               crst_table_init(table, entry);
-               pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
-               mm->pgd = (pgd_t *) table;
-               mm->task_size = mm->context.asce_limit;
-               table = NULL;
-               flush = 1;
-       }
+       pgd = (unsigned long *) mm->pgd;
+       crst_table_init(table, _REGION2_ENTRY_EMPTY);
+       pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+       mm->pgd = (pgd_t *) table;
+       mm->context.asce_limit = 1UL << 53;
+       mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                          _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+       mm->task_size = mm->context.asce_limit;
        spin_unlock_bh(&mm->page_table_lock);
-       if (table)
-               crst_table_free(mm, table);
-       if (mm->context.asce_limit < limit)
-               goto repeat;
-       if (flush)
-               on_each_cpu(__crst_table_upgrade, mm, 0);
+
+       on_each_cpu(__crst_table_upgrade, mm, 0);
        return 0;
 }
 
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
 {
        pgd_t *pgd;
 
+       /* downgrade should only happen from 3 to 2 levels (compat only) */
+       BUG_ON(mm->context.asce_limit != (1UL << 42));
+
        if (current->active_mm == mm) {
                clear_user_asce();
                __tlb_flush_mm(mm);
        }
-       while (mm->context.asce_limit > limit) {
-               pgd = mm->pgd;
-               switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
-               case _REGION_ENTRY_TYPE_R2:
-                       mm->context.asce_limit = 1UL << 42;
-                       mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-                                               _ASCE_USER_BITS |
-                                               _ASCE_TYPE_REGION3;
-                       break;
-               case _REGION_ENTRY_TYPE_R3:
-                       mm->context.asce_limit = 1UL << 31;
-                       mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-                                               _ASCE_USER_BITS |
-                                               _ASCE_TYPE_SEGMENT;
-                       break;
-               default:
-                       BUG();
-               }
-               mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
-               mm->task_size = mm->context.asce_limit;
-               crst_table_free(mm, (unsigned long *) pgd);
-       }
+
+       pgd = mm->pgd;
+       mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+       mm->context.asce_limit = 1UL << 31;
+       mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                          _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+       mm->task_size = mm->context.asce_limit;
+       crst_table_free(mm, (unsigned long *) pgd);
+
        if (current->active_mm == mm)
                set_user_asce(mm);
 }
index e595e89eac65d1f59831b5766d96507fbdbbd75a..1ea8c07eab849e467e5678c7e3dc662c49090860 100644 (file)
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
        zdev->dma_table = dma_alloc_cpu_table();
        if (!zdev->dma_table) {
                rc = -ENOMEM;
-               goto out_clean;
+               goto out;
        }
 
        /*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
        zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
        if (!zdev->iommu_bitmap) {
                rc = -ENOMEM;
-               goto out_reg;
+               goto free_dma_table;
        }
 
        rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                (u64) zdev->dma_table);
        if (rc)
-               goto out_reg;
-       return 0;
+               goto free_bitmap;
 
-out_reg:
+       return 0;
+free_bitmap:
+       vfree(zdev->iommu_bitmap);
+       zdev->iommu_bitmap = NULL;
+free_dma_table:
        dma_free_cpu_table(zdev->dma_table);
-out_clean:
+       zdev->dma_table = NULL;
+out:
        return rc;
 }
 
index 1baf0ba962426f8e92b6789aabacaf5c47787014..c9f8bbdb1bf8e901505aca6378705a25db9cb46f 100644 (file)
@@ -34,11 +34,6 @@ enum {
 DECLARE_PER_CPU(int, cpu_state);
 
 void smp_message_recv(unsigned int msg);
-void smp_timer_broadcast(const struct cpumask *mask);
-
-void local_timer_interrupt(void);
-void local_timer_setup(unsigned int cpu);
-void local_timer_stop(unsigned int cpu);
 
 void arch_send_call_function_single_ipi(int cpu);
 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
index b0a282d65f6a16bc0753faac736e67adc00f62e7..358e3f516ef6ca78b3c7844d174140cc99f759c8 100644 (file)
@@ -17,7 +17,7 @@
 
 #define mc_capable()    (1)
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
 
 extern cpumask_t cpu_core_map[NR_CPUS];
 
index 4a298808789c46cc3502348b283f3b22b02be7dd..839612c8a0a052e1a1a674ece5dcb7541ce6c467 100644 (file)
@@ -73,8 +73,6 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
 {
        int i;
 
-       local_timer_setup(0);
-
        BUILD_BUG_ON(SMP_MSG_NR >= 8);
 
        for (i = 0; i < SMP_MSG_NR; i++)
index 772caffba22fb644a077bfe8707148f75a1dc14a..c82912a61d74f26a3d3b1b3e47913c899329ecf6 100644 (file)
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
 cpumask_t cpu_core_map[NR_CPUS];
 EXPORT_SYMBOL(cpu_core_map);
 
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+static cpumask_t cpu_coregroup_map(int cpu)
 {
        /*
         * Presently all SH-X3 SMP cores are multi-cores, so just keep it
@@ -30,7 +30,7 @@ static cpumask_t cpu_coregroup_map(unsigned int cpu)
        return *cpu_possible_mask;
 }
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_core_map[cpu];
 }
index fb23fd6b186a1b0ffeeace7cca9e9a5a136fbfa6..c74d3701ad6830fe88338c185be374f7a6730f07 100644 (file)
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
index 04920ab8e292b75a99986b1a2d10ac6273e2db94..3583d676a9161f8197e826553162f011f390c1cd 100644 (file)
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
index 56f933816144d47bf96506d09d3249445bc0fe32..1d8321c827a8821bb4e9f4989eb883cd761370db 100644 (file)
@@ -48,6 +48,7 @@
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
 #define SUN4V_CHIP_SPARC64X    0x8a
+#define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
 #ifndef __ASSEMBLY__
index b6de8b10a55b8b8f09eedb2c90d86906d5445686..36eee8132c22bac329e99fb7284211e11310ff26 100644 (file)
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
 #define __NR_copy_file_range   357
+#define __NR_preadv2           358
+#define __NR_pwritev2          359
 
-#define NR_syscalls            358
+#define NR_syscalls            360
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 4ee1ad420862d425cff03ba7aad8e395fcb75907..655628def68e6be60b7cd31bda369b2fbbfd6c6b 100644 (file)
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1           ! Next cacheline
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_dcpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
        mov             0x2, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_dcpe_tl1,.-do_dcpe_tl1
 
        .globl          do_icpe_tl1
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_icpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
        mov             0x3, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_icpe_tl1,.-do_icpe_tl1
        
        .type           dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@ __cheetah_log_error:
         cmp            %g2, 0x63
        be              c_cee
         nop
-       ba,pt           %xcc, c_deferred
+       ba,a,pt         %xcc, c_deferred
        .size           __cheetah_log_error,.-__cheetah_log_error
 
        /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
index dfad8b1aea9fb042a40290c766a3bb7ede4e0480..493e023a468a919c61d77451e43e0a4a2e414bbe 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_SN:
+               sparc_cpu_type = "SPARC-SN";
+               sparc_fpu_type = "SPARC-SN integrated FPU";
+               sparc_pmu_type = "sparc-sn";
+               break;
+
        case SUN4V_CHIP_SPARC64X:
                sparc_cpu_type = "SPARC64-X";
                sparc_fpu_type = "SPARC64-X integrated FPU";
index e69ec0e3f15527705b3ce28fc89ff5c0341f03fb..45c820e1cba5d949ff936f15392ca3c0c8578a34 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
                break;
index a6864826a4bd963f0188bd1f5d64218cbdee1981..336d2750fe78c3d4d79175e3427e94859e0cca95 100644 (file)
@@ -100,8 +100,8 @@ do_fpdis:
        fmuld           %f0, %f2, %f26
        faddd           %f0, %f2, %f28
        fmuld           %f0, %f2, %f30
-       b,pt            %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 2:     andcc           %g5, FPRS_DU, %g0
        bne,pt          %icc, 3f
         fzero          %f32
@@ -144,8 +144,8 @@ do_fpdis:
        fmuld           %f32, %f34, %f58
        faddd           %f32, %f34, %f60
        fmuld           %f32, %f34, %f62
-       ba,pt           %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 3:     mov             SECONDARY_CONTEXT, %g3
        add             %g6, TI_FPREGS, %g1
 
@@ -197,8 +197,7 @@ fpdis_exit2:
 fp_other_bounce:
        call            do_fpother
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           fp_other_bounce,.-fp_other_bounce
 
        .align          32
index cd1f592cd3479f8c94c599bfc589087184d4d180..a076b4249e622a4ebfe47eab6c9bcd7cb98b36ce 100644 (file)
@@ -414,6 +414,8 @@ sun4v_chip_type:
        cmp     %g2, 'T'
        be,pt   %xcc, 70f
         cmp    %g2, 'M'
+       be,pt   %xcc, 70f
+        cmp    %g2, 'S'
        bne,pn  %xcc, 49f
         nop
 
@@ -433,6 +435,9 @@ sun4v_chip_type:
        cmp     %g2, '7'
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
+       cmp     %g2, 'N'
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
         nop
 
@@ -461,9 +466,8 @@ sun4v_chip_type:
        subcc   %g3, 1, %g3
        bne,pt  %xcc, 41b
        add     %g1, 1, %g1
-       mov     SUN4V_CHIP_SPARC64X, %g4
        ba,pt   %xcc, 5f
-       nop
+        mov    SUN4V_CHIP_SPARC64X, %g4
 
 49:
        mov     SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@ sun4u_init:
        stxa            %g0, [%g7] ASI_DMMU
        membar  #Sync
 
-       ba,pt           %xcc, sun4u_continue
-        nop
+       ba,a,pt         %xcc, sun4u_continue
 
 sun4v_init:
        /* Set ctx 0 */
@@ -560,14 +563,12 @@ sun4v_init:
        mov             SECONDARY_CONTEXT, %g7
        stxa            %g0, [%g7] ASI_MMU
        membar          #Sync
-       ba,pt           %xcc, niagara_tlb_fixup
-        nop
+       ba,a,pt         %xcc, niagara_tlb_fixup
 
 sun4u_continue:
        BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
 
-       ba,pt   %xcc, spitfire_tlb_fixup
-        nop
+       ba,a,pt %xcc, spitfire_tlb_fixup
 
 niagara_tlb_fixup:
        mov     3, %g2          /* Set TLB type to hypervisor. */
@@ -595,6 +596,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_SN
        be,pt   %xcc, niagara4_patch
         nop
 
@@ -639,8 +643,7 @@ niagara_patch:
        call    hypervisor_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 cheetah_tlb_fixup:
        mov     2, %g2          /* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@ cheetah_tlb_fixup:
        call    cheetah_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 spitfire_tlb_fixup:
        /* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@ setup_trap_table:
        call    %o1
         add    %sp, (2047 + 128), %o0
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
 1:     sethi   %hi(sparc64_ttable_tl0), %o0
        set     prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@ setup_trap_table:
 
        BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
        /* Disable STICK_INT interrupts. */
 1:
index 753b4f031bfb710b77cefa78b74ade61a9d8850c..34b4933900bf7665b30e791565795d5782351b60 100644 (file)
@@ -18,8 +18,7 @@ __do_privact:
 109:   or              %g7, %lo(109b), %g7
        call            do_privact
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __do_privact,.-__do_privact
 
        .type           do_mna,#function
@@ -46,8 +45,7 @@ do_mna:
        mov             %l5, %o2
        call            mem_address_unaligned
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_mna,.-do_mna
 
        .type           do_lddfmna,#function
@@ -65,8 +63,7 @@ do_lddfmna:
        mov             %l5, %o2
        call            handle_lddfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_lddfmna,.-do_lddfmna
 
        .type           do_stdfmna,#function
@@ -84,8 +81,7 @@ do_stdfmna:
        mov             %l5, %o2
        call            handle_stdfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_stdfmna,.-do_stdfmna
 
        .type           breakpoint_trap,#function
index badf0951d73c8f2d875aa5cbc093caef04616aeb..c2b202d763a16ae74d1d150ed6e7dd940341d7dd 100644 (file)
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
        }
 }
 
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+                                 void *stc, void *host_controller,
+                                 struct platform_device  *op,
+                                 int numa_node)
+{
+       sd->iommu = iommu;
+       sd->stc = stc;
+       sd->host_controller = host_controller;
+       sd->op = op;
+       sd->numa_node = numa_node;
+}
+
 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
                                         struct device_node *node,
                                         struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        if (!dev)
                return NULL;
 
+       op = of_find_device_by_node(node);
        sd = &dev->dev.archdata;
-       sd->iommu = pbm->iommu;
-       sd->stc = &pbm->stc;
-       sd->host_controller = pbm;
-       sd->op = op = of_find_device_by_node(node);
-       sd->numa_node = pbm->numa_node;
-
+       pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+                             pbm->numa_node);
        sd = &op->dev.archdata;
        sd->iommu = pbm->iommu;
        sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev)
        /* No special bus mastering setup handling */
 }
 
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+       struct pci_dev *pdev;
+
+       /* Add sriov arch specific initialization here.
+        * Copy dev_archdata from PF to VF
+        */
+       if (dev->is_virtfn) {
+               struct dev_archdata *psd;
+
+               pdev = dev->physfn;
+               psd = &pdev->dev.archdata;
+               pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+                                     psd->stc, psd->host_controller, NULL,
+                                     psd->numa_node);
+       }
+       return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
 static int __init pcibios_init(void)
 {
        pci_dfl_cache_line_size = 64 >> 2;
index 26db95b54ee94c44537590a1b94a4870bd9227ce..599f1207eed2e469157e2475631d2537543b5794 100644 (file)
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
 
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
        }
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
                                        AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
                                        AV_SPARC_FMAF);
index c357e40ffd01526ca38a824c12ac077cdba135c6..4a73009f66a5727e43ad45dbe1a2f7bded8cd48f 100644 (file)
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
        ba,pt           %xcc, etraptl1
         rd             %pc, %g7
 
-       ba,pt           %xcc, 2f
-        nop
+       ba,a,pt         %xcc, 2f
 
 1:     ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
        mov             %l5, %o2
        call            spitfire_access_error
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_access_error,.-__spitfire_access_error
 
        /* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_data_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
 
        .type           __spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
        mov             %l5, %o2
        call            spitfire_data_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception,.-__spitfire_data_access_exception
 
        .type           __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_insn_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
 
        .type           __spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
        mov             %l5, %o2
        call            spitfire_insn_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
index 6c3dd6c52f8bd09135e81f1d56704602d10c4f4e..eac7f0db5c8c6269a913152a11941f66f5f3e8d0 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index 12b524cfcfa0120caabdf7aa5b8606ecc3978c96..b0f17ff2ddba2daa75a8e8a646b5661dfe0d36e9 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index b7f0f3f3a909b05b544da1a7b3d3e21d12bfa357..c731e8023d3e7c6333ae091f39ca76aedf332603 100644 (file)
@@ -11,8 +11,7 @@ utrap_trap:           /* %g3=handler,%g4=level */
        mov             %l4, %o1
         call           bad_trap
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
 
 invoke_utrap:
        sllx            %g3, 3, %g3
index cb5789c9f9613ed692733d50dcf0e2c39784b1f7..f6bb857254fcfa170155d4cd8dc8cb717c5bfb97 100644 (file)
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device(
        return NULL;
 }
 
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+       const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+       add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+       return 0;
+}
+
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
        struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev,
        return sprintf(buf, "%s\n", vdev->type);
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       const struct vio_dev *vdev = to_vio_dev(dev);
+
+       return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
        __ATTR_RO(devspec),
        __ATTR_RO(type),
+       __ATTR_RO(modalias),
        __ATTR_NULL
 };
 
 static struct bus_type vio_bus_type = {
        .name           = "vio",
        .dev_attrs      = vio_dev_attrs,
+       .uevent         = vio_hotplug,
        .match          = vio_bus_match,
        .probe          = vio_device_probe,
        .remove         = vio_device_remove,
index aadd321aa05db983af75a13ee14f2693ad75ed5c..7d02b1fef0256bb84c218e55b4d2328d037f5903 100644 (file)
@@ -33,6 +33,10 @@ ENTRY(_start)
 jiffies = jiffies_64;
 #endif
 
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
 SECTIONS
 {
 #ifdef CONFIG_SPARC64
index 1e67ce95836972d439b50c88aa77d8395a543652..855019a8590ea5d556b71c9ae9356f7dcb629588 100644 (file)
@@ -32,8 +32,7 @@ fill_fixup:
         rd     %pc, %g7
        call    do_sparc64_fault
         add    %sp, PTREGS_OFF, %o0
-       ba,pt   %xcc, rtrap
-        nop
+       ba,a,pt %xcc, rtrap
 
        /* Be very careful about usage of the trap globals here.
         * You cannot touch %g5 as that has the fault information.
index 1cfe6aab7a11572d54f6848fe4656b7b40af8cf2..09e838801e397b4071b6604852e24a2a799e0a86 100644 (file)
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void)
                        max_phys_bits = 47;
                        break;
                case SUN4V_CHIP_SPARC_M7:
+               case SUN4V_CHIP_SPARC_SN:
                default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
        default:
@@ -2138,6 +2140,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
        default:
index 3f3dfb8b150a114adb5d0e29d2f453b89f8194ad..718905557f7e44a86690d55b3326f705b8a5207b 100644 (file)
@@ -221,8 +221,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_SKY2=y
 CONFIG_PTP_1588_CLOCK_TILEGX=y
 # CONFIG_WLAN is not set
index ef9e27eb2f50cfa11b2c931bbbdc31928ba49385..dc85468afd5ec33c978904b06358761eaacaf852 100644 (file)
@@ -340,8 +340,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 # CONFIG_NET_VENDOR_3COM is not set
 CONFIG_E1000E=y
 # CONFIG_WLAN is not set
index 9ef669d24bb22098f42ca8bd5253616a54647151..2cd5b6874c7bb65fa75e6f6c686fc72a6d8aff0c 100644 (file)
@@ -223,7 +223,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (len == skb->len) {
                dev->stats.tx_packets++;
                dev->stats.tx_bytes += skb->len;
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                netif_start_queue(dev);
 
                /* this is normally done in the interrupt when tx finishes */
@@ -252,7 +252,7 @@ static void uml_net_set_multicast_list(struct net_device *dev)
 
 static void uml_net_tx_timeout(struct net_device *dev)
 {
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
 }
 
index 6915ff2bd9962e0b495d0af05c18f51a9382c36e..8774cb23064fe417cf35935c33e876eb9faa9670 100644 (file)
@@ -26,7 +26,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
        vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,6 +40,18 @@ GCOV_PROFILE := n
 UBSAN_SANITIZE :=n
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
+ifeq ($(CONFIG_RELOCATABLE),y)
+# If kernel is relocatable, build compressed kernel as PIE.
+ifeq ($(CONFIG_X86_32),y)
+LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
+else
+# To build 64-bit compressed kernel as PIE, we disable relocation
+# overflow check to avoid relocation overflow error with a new linker
+# command-line option, -z noreloc-overflow.
+LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
+       && echo "-z noreloc-overflow -pie --no-dynamic-linker")
+endif
+endif
 LDFLAGS_vmlinux := -T
 
 hostprogs-y    := mkpiggy
index 8ef964ddc18ec656b1e3b0f038adf134484dbdd2..0256064da8da38c69098cbccc75a19cc0493ca82 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
+ * relocation to get the symbol address in PIC.  When the compressed x86
+ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
+ * relocations to their fixed symbol addresses.  However, when the
+ * compressed x86 kernel is loaded at a different address, it leads
+ * to the following load failure:
+ *
+ *   Failed to allocate space for phdrs
+ *
+ * during the decompression stage.
+ *
+ * If the compressed x86 kernel is relocatable at run-time, it should be
+ * compiled with -fPIE, instead of -fPIC, if possible and should be built as
+ * Position Independent Executable (PIE) so that linker won't optimize
+ * R_386_GOT32X relocation to its fixed symbol address.  Older
+ * linkers generate R_386_32 relocations against locally defined symbols,
+ * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
+ * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
+ * R_386_32 relocations when relocating the kernel.  To generate
+ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
+ * hidden:
+ */
+       .hidden _bss
+       .hidden _ebss
+       .hidden _got
+       .hidden _egot
+
        __HEAD
 ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
index b0c0d16ef58d1099342c97aff83767dd35c73691..86558a1991393c509bc3005c021c2ab490b26b2a 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * Locally defined symbols should be marked hidden:
+ */
+       .hidden _bss
+       .hidden _ebss
+       .hidden _got
+       .hidden _egot
+
        __HEAD
        .code32
 ENTRY(startup_32)
index a8a0224fa0f8a4682f76281034a3172001f50200..081255cea1ee5d442a75529172e097afce7f396c 100644 (file)
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index 049ada8d4e9c98b9c187b62528a55d9206a3d7cc..bd3e8421b57c8beeeb9b6d70ec2ad65e613b4f75 100644 (file)
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
 /*
  * AMD Performance Monitor K7 and later.
  */
-static const u64 amd_perfmon_event_map[] =
+static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
   [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
   [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
@@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu)
 
        WARN_ON_ONCE(cpuc->amd_nb);
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return NOTIFY_OK;
 
        cpuc->amd_nb = amd_alloc_nb(cpu);
@@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu)
 
        cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return;
 
        nb_id = amd_get_nb_id(cpu);
@@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu)
 {
        struct cpu_hw_events *cpuhw;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return;
 
        cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = {
        .cpu_prepare            = amd_pmu_cpu_prepare,
        .cpu_starting           = amd_pmu_cpu_starting,
        .cpu_dead               = amd_pmu_cpu_dead,
+
+       .amd_nb_constraints     = 1,
 };
 
 static int __init amd_core_pmu_init(void)
@@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void)
        x86_pmu.eventsel        = MSR_F15H_PERF_CTL;
        x86_pmu.perfctr         = MSR_F15H_PERF_CTR;
        x86_pmu.num_counters    = AMD64_NUM_COUNTERS_CORE;
+       /*
+        * AMD Core perfctr has separate MSRs for the NB events, see
+        * the amd/uncore.c driver.
+        */
+       x86_pmu.amd_nb_constraints = 0;
 
        pr_cont("core perfctr, ");
        return 0;
@@ -693,6 +700,14 @@ __init int amd_pmu_init(void)
        if (ret)
                return ret;
 
+       if (num_possible_cpus() == 1) {
+               /*
+                * No point in allocating data structures to serialize
+                * against other CPUs, when there is only the one CPU.
+                */
+               x86_pmu.amd_nb_constraints = 0;
+       }
+
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
               sizeof(hw_cache_event_ids));
index 3ea25c3917c07463360d457471e734548fb4e023..feb90f6730e8aebc06780090dcb6738750dd0a43 100644 (file)
@@ -28,10 +28,46 @@ static u32 ibs_caps;
 #define IBS_FETCH_CONFIG_MASK  (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
 #define IBS_OP_CONFIG_MASK     IBS_OP_MAX_CNT
 
+
+/*
+ * IBS states:
+ *
+ * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
+ * and any further add()s must fail.
+ *
+ * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
+ * complicated by the fact that the IBS hardware can send late NMIs (ie. after
+ * we've cleared the EN bit).
+ *
+ * In order to consume these late NMIs we have the STOPPED state, any NMI that
+ * happens after we've cleared the EN state will clear this bit and report the
+ * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
+ * someone else can consume our BIT and our NMI will go unhandled).
+ *
+ * And since we cannot set/clear this separate bit together with the EN bit,
+ * there are races; if we cleared STARTED early, an NMI could land in
+ * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
+ * could happen if the period is small enough), and consume our STOPPED bit
+ * and trigger streams of unhandled NMIs.
+ *
+ * If, however, we clear STARTED late, an NMI can hit between clearing the
+ * EN bit and clearing STARTED, still see STARTED set and process the event.
+ * If this event will have the VALID bit clear, we bail properly, but this
+ * is not a given. With VALID set we can end up calling pmu::stop() again
+ * (the throttle logic) and trigger the WARNs in there.
+ *
+ * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
+ * nesting, and clear STARTED late, so that we have a well defined state over
+ * the clearing of the EN bit.
+ *
+ * XXX: we could probably be using !atomic bitops for all this.
+ */
+
 enum ibs_states {
        IBS_ENABLED     = 0,
        IBS_STARTED     = 1,
        IBS_STOPPING    = 2,
+       IBS_STOPPED     = 3,
 
        IBS_MAX_STATES,
 };
@@ -377,11 +413,10 @@ static void perf_ibs_start(struct perf_event *event, int flags)
 
        perf_ibs_set_period(perf_ibs, hwc, &period);
        /*
-        * Set STARTED before enabling the hardware, such that
-        * a subsequent NMI must observe it. Then clear STOPPING
-        * such that we don't consume NMIs by accident.
+        * Set STARTED before enabling the hardware, such that a subsequent NMI
+        * must observe it.
         */
-       set_bit(IBS_STARTED, pcpu->state);
+       set_bit(IBS_STARTED,    pcpu->state);
        clear_bit(IBS_STOPPING, pcpu->state);
        perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
 
@@ -396,6 +431,9 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
        u64 config;
        int stopping;
 
+       if (test_and_set_bit(IBS_STOPPING, pcpu->state))
+               return;
+
        stopping = test_bit(IBS_STARTED, pcpu->state);
 
        if (!stopping && (hwc->state & PERF_HES_UPTODATE))
@@ -405,12 +443,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
 
        if (stopping) {
                /*
-                * Set STOPPING before disabling the hardware, such that it
+                * Set STOPPED before disabling the hardware, such that it
                 * must be visible to NMIs the moment we clear the EN bit,
                 * at which point we can generate an !VALID sample which
                 * we need to consume.
                 */
-               set_bit(IBS_STOPPING, pcpu->state);
+               set_bit(IBS_STOPPED, pcpu->state);
                perf_ibs_disable_event(perf_ibs, hwc, config);
                /*
                 * Clear STARTED after disabling the hardware; if it were
@@ -556,7 +594,7 @@ fail:
                 * with samples that even have the valid bit cleared.
                 * Mark all this NMIs as handled.
                 */
-               if (test_and_clear_bit(IBS_STOPPING, pcpu->state))
+               if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
                        return 1;
 
                return 0;
index 40625ca7a190953fc87c386a03017bab96903435..6011a573dd64995f0818667c1bb426518d689b78 100644 (file)
@@ -474,6 +474,7 @@ static __init int _init_perf_amd_iommu(
 
 static struct perf_amd_iommu __perf_iommu = {
        .pmu = {
+               .task_ctx_nr    = perf_invalid_context,
                .event_init     = perf_iommu_event_init,
                .add            = perf_iommu_add,
                .del            = perf_iommu_del,
index 68fa55b4d42e672ff466eecaed9782ff4e404795..a6fd4dbcf820abf727b6118c0084a6877ec0340d 100644 (file)
@@ -3637,8 +3637,11 @@ __init int intel_pmu_init(void)
                pr_cont("Knights Landing events, ");
                break;
 
+       case 142: /* 14nm Kabylake Mobile */
+       case 158: /* 14nm Kabylake Desktop */
        case 78: /* 14nm Skylake Mobile */
        case 94: /* 14nm Skylake Desktop */
+       case 85: /* 14nm Skylake Server */
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
index 6c3b7c1780c983627d866584f4c2c6c2379cc65a..1ca5d1e7d4f253429fc1c44968d8219dff4086cd 100644 (file)
@@ -63,7 +63,7 @@ static enum {
 
 #define LBR_PLM (LBR_KERNEL | LBR_USER)
 
-#define LBR_SEL_MASK   0x1ff   /* valid bits in LBR_SELECT */
+#define LBR_SEL_MASK   0x3ff   /* valid bits in LBR_SELECT */
 #define LBR_NOT_SUPP   -1      /* LBR filter not supported */
 #define LBR_IGN                0       /* ignored */
 
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
         * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
         * in suppress mode. So LBR_SELECT should be set to
         * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+        * But the 10th bit LBR_CALL_STACK does not operate
+        * in suppress mode.
         */
-       reg->config = mask ^ x86_pmu.lbr_sel_mask;
+       reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
 
        if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
            (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
index 6af7cf71d6b2e8dde7eb042ab3930d557f298efe..09a77dbc73c93110a40d2afbf2dedc86e50ea44f 100644 (file)
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
        struct dev_ext_attribute *de_attrs;
        struct attribute **attrs;
        size_t size;
+       u64 reg;
        int ret;
        long i;
 
+       if (boot_cpu_has(X86_FEATURE_VMX)) {
+               /*
+                * Intel SDM, 36.5 "Tracing post-VMXON" says that
+                * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
+                * post-VMXON.
+                */
+               rdmsrl(MSR_IA32_VMX_MISC, reg);
+               if (reg & BIT(14))
+                       pt_pmu.vmx = true;
+       }
+
        attrs = NULL;
 
        for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
 
        reg |= (event->attr.config & PT_CONFIG_MASK);
 
+       event->hw.config = reg;
        wrmsrl(MSR_IA32_RTIT_CTL, reg);
 }
 
-static void pt_config_start(bool start)
+static void pt_config_stop(struct perf_event *event)
 {
-       u64 ctl;
+       u64 ctl = READ_ONCE(event->hw.config);
+
+       /* may be already stopped by a PMI */
+       if (!(ctl & RTIT_CTL_TRACEEN))
+               return;
 
-       rdmsrl(MSR_IA32_RTIT_CTL, ctl);
-       if (start)
-               ctl |= RTIT_CTL_TRACEEN;
-       else
-               ctl &= ~RTIT_CTL_TRACEEN;
+       ctl &= ~RTIT_CTL_TRACEEN;
        wrmsrl(MSR_IA32_RTIT_CTL, ctl);
 
+       WRITE_ONCE(event->hw.config, ctl);
+
        /*
         * A wrmsr that disables trace generation serializes other PT
         * registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
         * The below WMB, separating data store and aux_head store matches
         * the consumer's RMB that separates aux_head load and data load.
         */
-       if (!start)
-               wmb();
+       wmb();
 }
 
 static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
        if (!ACCESS_ONCE(pt->handle_nmi))
                return;
 
-       pt_config_start(false);
+       /*
+        * If VMX is on and PT does not support it, don't touch anything.
+        */
+       if (READ_ONCE(pt->vmx_on))
+               return;
 
        if (!event)
                return;
 
+       pt_config_stop(event);
+
        buf = perf_get_aux(&pt->handle);
        if (!buf)
                return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
        }
 }
 
+void intel_pt_handle_vmx(int on)
+{
+       struct pt *pt = this_cpu_ptr(&pt_ctx);
+       struct perf_event *event;
+       unsigned long flags;
+
+       /* PT plays nice with VMX, do nothing */
+       if (pt_pmu.vmx)
+               return;
+
+       /*
+        * VMXON will clear RTIT_CTL.TraceEn; we need to make
+        * sure to not try to set it while VMX is on. Disable
+        * interrupts to avoid racing with pmu callbacks;
+        * concurrent PMI should be handled fine.
+        */
+       local_irq_save(flags);
+       WRITE_ONCE(pt->vmx_on, on);
+
+       if (on) {
+               /* prevent pt_config_stop() from writing RTIT_CTL */
+               event = pt->handle.event;
+               if (event)
+                       event->hw.config = 0;
+       }
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
+
 /*
  * PMU callbacks
  */
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
        struct pt *pt = this_cpu_ptr(&pt_ctx);
        struct pt_buffer *buf = perf_get_aux(&pt->handle);
 
+       if (READ_ONCE(pt->vmx_on))
+               return;
+
        if (!buf || pt_buffer_is_full(buf, pt)) {
                event->hw.state = PERF_HES_STOPPED;
                return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
         * see comment in intel_pt_interrupt().
         */
        ACCESS_ONCE(pt->handle_nmi) = 0;
-       pt_config_start(false);
+
+       pt_config_stop(event);
 
        if (event->hw.state == PERF_HES_STOPPED)
                return;
index 336878a5d205a1fc12e024d4b48586f88a10a138..3abb5f5cccc87d0a00cd4103cdad0f2674ca8423 100644 (file)
@@ -65,6 +65,7 @@ enum pt_capabilities {
 struct pt_pmu {
        struct pmu              pmu;
        u32                     caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+       bool                    vmx;
 };
 
 /**
@@ -107,10 +108,12 @@ struct pt_buffer {
  * struct pt - per-cpu pt context
  * @handle:    perf output handle
  * @handle_nmi:        do handle PT PMI on this cpu, there's an active event
+ * @vmx_on:    1 if VMX is ON on this cpu
  */
 struct pt {
        struct perf_output_handle handle;
        int                     handle_nmi;
+       int                     vmx_on;
 };
 
 #endif /* __INTEL_PT_H__ */
index 70c93f9b03acc49e245154323b2b2787d362d76d..1705c9d75e4477e1c246d608fa1e9ebb36f6f50b 100644 (file)
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
                break;
        case 60: /* Haswell */
        case 69: /* Haswell-Celeron */
+       case 70: /* Haswell GT3e */
        case 61: /* Broadwell */
        case 71: /* Broadwell-H */
                rapl_cntr_mask = RAPL_IDX_HSW;
index ba6ef18528c906444049587506535115687ce01c..ad4dc7ffffb5eb44eeb79c08e924697547f14ebd 100644 (file)
@@ -607,6 +607,11 @@ struct x86_pmu {
         */
        atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
 
+       /*
+        * AMD bits
+        */
+       unsigned int    amd_nb_constraints : 1;
+
        /*
         * Extra registers for events
         */
@@ -795,6 +800,9 @@ ssize_t intel_event_sysfs_show(char *page, u64 config);
 
 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 
+ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
+                         char *page);
+
 #ifdef CONFIG_CPU_SUP_AMD
 
 int amd_pmu_init(void);
@@ -925,9 +933,6 @@ int p6_pmu_init(void);
 
 int knc_pmu_init(void);
 
-ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
-                         char *page);
-
 static inline int is_ht_workaround_enabled(void)
 {
        return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
index f8a29d2c97b012febc3728809a5daa53de22cf0f..e6a8613fbfb0ea19f8d507c4ce6ae1f0d4a35be6 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/page.h>
 #include <asm-generic/hugetlb.h>
 
+#define hugepages_supported() cpu_has_pse
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
                                         unsigned long addr,
index f62a9f37f79f6c15221aed106f34508d05940be8..b7e394485a5f2a641bc466eef1bc210c348a936c 100644 (file)
@@ -43,7 +43,7 @@
 
 #define KVM_PIO_PAGE_OFFSET 1
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 400000
 
 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
 
index 2da46ac16e3750441294d983d82f359fdccdb0a4..5b3c9a55f51cbeda86cb52dedac1f3f494042e55 100644 (file)
 #define MSR_PKG_C9_RESIDENCY           0x00000631
 #define MSR_PKG_C10_RESIDENCY          0x00000632
 
+/* Interrupt Response Limit */
+#define MSR_PKGC3_IRTL                 0x0000060a
+#define MSR_PKGC6_IRTL                 0x0000060b
+#define MSR_PKGC7_IRTL                 0x0000060c
+#define MSR_PKGC8_IRTL                 0x00000633
+#define MSR_PKGC9_IRTL                 0x00000634
+#define MSR_PKGC10_IRTL                        0x00000635
+
 /* Run Time Average Power Limiting (RAPL) Interface */
 
 #define MSR_RAPL_POWER_UNIT            0x00000606
 #define MSR_PP1_ENERGY_STATUS          0x00000641
 #define MSR_PP1_POLICY                 0x00000642
 
+/* Config TDP MSRs */
 #define MSR_CONFIG_TDP_NOMINAL         0x00000648
 #define MSR_CONFIG_TDP_LEVEL_1         0x00000649
 #define MSR_CONFIG_TDP_LEVEL_2         0x0000064A
 #define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
 #define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
 
-/* Config TDP MSRs */
-#define MSR_CONFIG_TDP_NOMINAL         0x00000648
-#define MSR_CONFIG_TDP_LEVEL1          0x00000649
-#define MSR_CONFIG_TDP_LEVEL2          0x0000064A
-#define MSR_CONFIG_TDP_CONTROL         0x0000064B
-#define MSR_TURBO_ACTIVATION_RATIO     0x0000064C
-
 /* Hardware P state interface */
 #define MSR_PPERF                      0x0000064e
 #define MSR_PERF_LIMIT_REASONS         0x0000064f
index 5a2ed3ed2f261893d5de08022ea3259198c8c0fe..f353061bba1d0ff9f61f9f09ef6c150dae501266 100644 (file)
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void)    { }
 static inline void perf_check_microcode(void) { }
 #endif
 
+#ifdef CONFIG_CPU_SUP_INTEL
+ extern void intel_pt_handle_vmx(int on);
+#endif
+
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  extern void amd_pmu_enable_virt(void);
  extern void amd_pmu_disable_virt(void);
index 983738ac014c6508f499b26650d44b6f5b3b1b2b..9264476f3d578e8fa346411aad4900e85afcb41a 100644 (file)
@@ -132,8 +132,6 @@ struct cpuinfo_x86 {
        u16                     logical_proc_id;
        /* Core id: */
        u16                     cpu_core_id;
-       /* Compute unit id */
-       u8                      compute_unit_id;
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
index 20a3de5cb3b0dd5e3362833baebd752c1142ae4e..66b057306f404718c233c18c5603ec30e9e535ba 100644 (file)
@@ -155,6 +155,7 @@ static inline int wbinvd_on_all_cpus(void)
        wbinvd();
        return 0;
 }
+#define smp_num_siblings       1
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus;
index 82866697fcf186ac7f63f1f6b5f1f77385dc8ab4..ffae84df8a9313cd3cc7b12953c0c24a809cd792 100644 (file)
@@ -276,11 +276,9 @@ static inline bool is_ia32_task(void)
  */
 #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
 
-#endif /* !__ASSEMBLY__ */
-
-#ifndef __ASSEMBLY__
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
-#endif
+#endif /* !__ASSEMBLY__ */
+
 #endif /* _ASM_X86_THREAD_INFO_H */
index 29fa475ec51823e61a9e94f34142e37341a1208b..a147e676fc7b3439d156534472d63be5c2d10f4a 100644 (file)
@@ -170,15 +170,13 @@ int amd_get_subcaches(int cpu)
 {
        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
        unsigned int mask;
-       int cuid;
 
        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
                return 0;
 
        pci_read_config_dword(link, 0x1d4, &mask);
 
-       cuid = cpu_data(cpu).compute_unit_id;
-       return (mask >> (4 * cuid)) & 0xf;
+       return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 }
 
 int amd_set_subcaches(int cpu, unsigned long mask)
@@ -204,7 +202,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
        }
 
-       cuid = cpu_data(cpu).compute_unit_id;
+       cuid = cpu_data(cpu).cpu_core_id;
        mask <<= 4 * cuid;
        mask |= (0xf ^ (1 << cuid)) << 26;
 
index ad59d70bcb1a6109742773e5f69a7235bca712f5..ef495511f019f0a899325d6a28ed5da98e1c939e 100644 (file)
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
        struct irq_desc *desc;
        int cpu, vector;
 
-       BUG_ON(!data->cfg.vector);
+       if (!data->cfg.vector)
+               return;
 
        vector = data->cfg.vector;
        for_each_cpu_and(cpu, data->domain, cpu_online_mask)
index 8f4942e2bcbb21584a78225b1208adefe86e0aa9..d7ce96a7dacaedc630a0e9a5ef5b761edbd0ffc7 100644 (file)
@@ -891,9 +891,7 @@ void __init uv_system_init(void)
        }
        pr_info("UV: Found %s hub\n", hub);
 
-       /* We now only need to map the MMRs on UV1 */
-       if (is_uv1_hub())
-               map_low_mmrs();
+       map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
index 6e47e3a916f12a03069a7c38ef21e9b2df5edcdf..7b76eb67a9b3dcb84bb8e6cd6e40945924d32938 100644 (file)
@@ -300,7 +300,6 @@ static int nearby_node(int apicid)
 #ifdef CONFIG_SMP
 static void amd_get_topology(struct cpuinfo_x86 *c)
 {
-       u32 cores_per_cu = 1;
        u8 node_id;
        int cpu = smp_processor_id();
 
@@ -313,8 +312,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
                /* get compute unit information */
                smp_num_siblings = ((ebx >> 8) & 3) + 1;
-               c->compute_unit_id = ebx & 0xff;
-               cores_per_cu += ((ebx >> 8) & 3);
+               c->x86_max_cores /= smp_num_siblings;
+               c->cpu_core_id = ebx & 0xff;
        } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
@@ -325,19 +324,16 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
        /* fixup multi-node processor information */
        if (nodes_per_socket > 1) {
-               u32 cores_per_node;
                u32 cus_per_node;
 
                set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-               cores_per_node = c->x86_max_cores / nodes_per_socket;
-               cus_per_node = cores_per_node / cores_per_cu;
+               cus_per_node = c->x86_max_cores / nodes_per_socket;
 
                /* store NodeID, use llc_shared_map to store sibling info */
                per_cpu(cpu_llc_id, cpu) = node_id;
 
                /* core id has to be in the [0 .. cores_per_node - 1] range */
-               c->cpu_core_id %= cores_per_node;
-               c->compute_unit_id %= cus_per_node;
+               c->cpu_core_id %= cus_per_node;
        }
 }
 #endif
index 0a850100c5944641717c797e5d4f129b2cf9a79f..2658e2af74ec4c3f433b9958b4498d7f7d603fed 100644 (file)
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
 void mce_gen_pool_process(void)
 {
        struct llist_node *head;
-       struct mce_evt_llist *node;
+       struct mce_evt_llist *node, *tmp;
        struct mce *mce;
 
        head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
                return;
 
        head = llist_reverse_order(head);
-       llist_for_each_entry(node, head, llnode) {
+       llist_for_each_entry_safe(node, tmp, head, llnode) {
                mce = &node->mce;
                atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
                gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
index 4e7c6933691cc8de42aa4a82473482127bd0faab..10c11b4da31df0ded6a7a788b8647f22067e95be 100644 (file)
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static unsigned char hv_get_nmi_reason(void)
+{
+       return 0;
+}
+
 static void __init ms_hyperv_init_platform(void)
 {
        /*
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
        machine_ops.crash_shutdown = hv_machine_crash_shutdown;
 #endif
        mark_tsc_unstable("running on Hyper-V");
+
+       /*
+        * Generation 2 instances don't support reading the NMI status from
+        * 0x61 port.
+        */
+       if (efi_enabled(EFI_BOOT))
+               x86_platform.get_nmi_reason = hv_get_nmi_reason;
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
index 31f0f335ed2241d6d561e6b4edc98f8e03216e96..1dd8294fd7301c979744deedc1922f226b0e0346 100644 (file)
@@ -18,4 +18,6 @@ const char *const x86_power_flags[32] = {
        "",     /* tsc invariant mapped to constant_tsc */
        "cpb",  /* core performance boost */
        "eff_freq_ro", /* Readonly aperf/mperf */
+       "proc_feedback", /* processor feedback interface */
+       "acc_power", /* accumulated power mechanism */
 };
index 54cdbd2003fe0930ff0a91158f55d00723e2400e..af1112980dd411334ef59d7d0a7b818946f2137d 100644 (file)
@@ -389,12 +389,6 @@ default_entry:
        /* Make changes effective */
        wrmsr
 
-       /*
-        * And make sure that all the mappings we set up have NX set from
-        * the beginning.
-        */
-       orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
-
 enable_paging:
 
 /*
index b2c99f811c3ff437ddfb45c7c4a56ed73101eccd..a2065d3b3b396f4503f4e4f42acc2af2bd4b307b 100644 (file)
@@ -422,7 +422,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
                if (c->phys_proc_id == o->phys_proc_id &&
                    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
-                   c->compute_unit_id == o->compute_unit_id)
+                   c->cpu_core_id == o->cpu_core_id)
                        return topology_sane(c, o, "smt");
 
        } else if (c->phys_proc_id == o->phys_proc_id &&
index b285d4e8c68e33bf3d4f2b070fdbcbbce7036d9a..5da924bbf0a0f22aa676f5be5a3f97fe4811b087 100644 (file)
@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
                                        continue;
                                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                                        resource_size_t start, end;
+                                       unsigned long flags;
+
+                                       flags = pci_resource_flags(dev, i);
+                                       if (!(flags & IORESOURCE_MEM))
+                                               continue;
+
+                                       if (flags & IORESOURCE_UNSET)
+                                               continue;
+
+                                       if (pci_resource_len(dev, i) == 0)
+                                               continue;
 
                                        start = pci_resource_start(dev, i);
-                                       if (start == 0)
-                                               break;
                                        end = pci_resource_end(dev, i);
                                        if (screen_info.lfb_base >= start &&
                                            screen_info.lfb_base < end) {
                                                found_bar = 1;
+                                               break;
                                        }
                                }
                        }
index 92ae6acac8a7fbcb9b91cb386b3c23015c5377ea..6aa0f4d9eea6816bbf0c78514e0b76d8f4dc7def 100644 (file)
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
 
        if (freq_desc_tables[cpu_index].msr_plat) {
                rdmsr(MSR_PLATFORM_INFO, lo, hi);
-               ratio = (lo >> 8) & 0x1f;
+               ratio = (lo >> 8) & 0xff;
        } else {
                rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
                ratio = (hi >> 8) & 0x1f;
index 8efb839948e512e9aac6aaf544230195614297c1..bbbaa802d13efc8b1e57f7defa351cacb2aaab78 100644 (file)
@@ -534,6 +534,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                        do_cpuid_1_ent(&entry[i], function, idx);
                        if (idx == 1) {
                                entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
+                               cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
                                entry[i].ebx = 0;
                                if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
                                        entry[i].ebx =
index 5ff3485acb60b2ad37a93084cd91a13c888c6fbc..01bd7b7a6866ec0f93f30f62091ec384d783660a 100644 (file)
@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
                break;
        case HVCALL_POST_MESSAGE:
        case HVCALL_SIGNAL_EVENT:
+               /* don't bother userspace if it has no way to handle it */
+               if (!vcpu_to_synic(vcpu)->active) {
+                       res = HV_STATUS_INVALID_HYPERCALL_CODE;
+                       break;
+               }
                vcpu->run->exit_reason = KVM_EXIT_HYPERV;
                vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
                vcpu->run->hyperv.u.hcall.input = param;
index 443d2a57ad3d9620246097a48ed3cd7de9e02f50..1a2da0e5a373284f6397c3b5485986f086995e1a 100644 (file)
@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
 
                hrtimer_start(&apic->lapic_timer.timer,
                              ktime_add_ns(now, apic->lapic_timer.period),
-                             HRTIMER_MODE_ABS);
+                             HRTIMER_MODE_ABS_PINNED);
 
                apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
                           PRIx64 ", "
@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
                        expire = ktime_add_ns(now, ns);
                        expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
                        hrtimer_start(&apic->lapic_timer.timer,
-                                     expire, HRTIMER_MODE_ABS);
+                                     expire, HRTIMER_MODE_ABS_PINNED);
                } else
                        apic_timer_expired(apic);
 
@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
        apic->vcpu = vcpu;
 
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
-                    HRTIMER_MODE_ABS);
+                    HRTIMER_MODE_ABS_PINNED);
        apic->lapic_timer.timer.function = apic_timer_fn;
 
        /*
@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 
        timer = &vcpu->arch.apic->lapic_timer.timer;
        if (hrtimer_cancel(timer))
-               hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+               hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 /*
index 70e95d097ef104ac489a41a17dce2296dad5123c..b6f50e8b0a393675009a5dcaad7f30af315bc91d 100644 (file)
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
              !is_writable_pte(new_spte))
                ret = true;
 
-       if (!shadow_accessed_mask)
+       if (!shadow_accessed_mask) {
+               /*
+                * We don't set page dirty when dropping non-writable spte.
+                * So do it now if the new spte is becoming non-writable.
+                */
+               if (ret)
+                       kvm_set_pfn_dirty(spte_to_pfn(old_spte));
                return ret;
+       }
 
        /*
         * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
 
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
-       if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+       if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
+                                           PT_WRITABLE_MASK))
                kvm_set_pfn_dirty(pfn);
        return 1;
 }
@@ -2815,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         */
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
-           PageTransCompound(pfn_to_page(pfn)) &&
+           PageTransCompoundMap(pfn_to_page(pfn)) &&
            !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
                /*
@@ -4777,7 +4785,7 @@ restart:
                 */
                if (sp->role.direct &&
                        !kvm_is_reserved_pfn(pfn) &&
-                       PageTransCompound(pfn_to_page(pfn))) {
+                       PageTransCompoundMap(pfn_to_page(pfn))) {
                        drop_spte(kvm, sptep);
                        need_tlb_flush = 1;
                        goto restart;
index b70df72e2b33d417307d01f57bca57388650d34f..66b33b96a31b47138c9fef94c20e034d07678cb7 100644 (file)
@@ -173,10 +173,9 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        int index = (pfec >> 1) +
                    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
        bool fault = (mmu->permissions[index] >> pte_access) & 1;
+       u32 errcode = PFERR_PRESENT_MASK;
 
        WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
-       pfec |= PFERR_PRESENT_MASK;
-
        if (unlikely(mmu->pkru_mask)) {
                u32 pkru_bits, offset;
 
@@ -189,15 +188,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
 
                /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
-               offset = pfec - 1 +
+               offset = (pfec & ~1) +
                        ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
 
                pkru_bits &= mmu->pkru_mask >> offset;
-               pfec |= -pkru_bits & PFERR_PK_MASK;
+               errcode |= -pkru_bits & PFERR_PK_MASK;
                fault |= (pkru_bits != 0);
        }
 
-       return -(uint32_t)fault & pfec;
+       return -(u32)fault & errcode;
 }
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
index 1d971c7553c3847f0d1335487ce551a19875b709..bc019f70e0b6bb374d851bdb3be32ea09e49768b 100644 (file)
@@ -360,7 +360,7 @@ retry_walk:
                        goto error;
 
                if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
-                       errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+                       errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
                        goto error;
                }
 
index ee1c8a93871c551f9cddc93de56197806d0c0cfd..133679d520afee3934bd5dc155d9675d69198a8d 100644 (file)
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
 
 static void kvm_cpu_vmxon(u64 addr)
 {
+       intel_pt_handle_vmx(1);
+
        asm volatile (ASM_VMX_VMXON_RAX
                        : : "a"(&addr), "m"(addr)
                        : "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
 static void kvm_cpu_vmxoff(void)
 {
        asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+
+       intel_pt_handle_vmx(0);
 }
 
 static void hardware_disable(void)
index 742d0f7d3556e143e219c4ea474e85605be33cde..9b7798c7b210e75499644ed1ca35b643fe743208 100644 (file)
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
                        return 1;
        }
-       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
 
        if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6095,12 +6094,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
        }
 
        /* try to inject new event if pending */
-       if (vcpu->arch.nmi_pending) {
-               if (kvm_x86_ops->nmi_allowed(vcpu)) {
-                       --vcpu->arch.nmi_pending;
-                       vcpu->arch.nmi_injected = true;
-                       kvm_x86_ops->set_nmi(vcpu);
-               }
+       if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+               --vcpu->arch.nmi_pending;
+               vcpu->arch.nmi_injected = true;
+               kvm_x86_ops->set_nmi(vcpu);
        } else if (kvm_cpu_has_injectable_intr(vcpu)) {
                /*
                 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6566,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (inject_pending_event(vcpu, req_int_win) != 0)
                        req_immediate_exit = true;
                /* enable NMI/IRQ window open exits if needed */
-               else if (vcpu->arch.nmi_pending)
-                       kvm_x86_ops->enable_nmi_window(vcpu);
-               else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-                       kvm_x86_ops->enable_irq_window(vcpu);
+               else {
+                       if (vcpu->arch.nmi_pending)
+                               kvm_x86_ops->enable_nmi_window(vcpu);
+                       if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+                               kvm_x86_ops->enable_irq_window(vcpu);
+               }
 
                if (kvm_lapic_enabled(vcpu)) {
                        update_cr8_intercept(vcpu);
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        kvm_x86_ops->prepare_guest_switch(vcpu);
        if (vcpu->fpu_active)
                kvm_load_guest_fpu(vcpu);
-       kvm_load_guest_xcr0(vcpu);
-
        vcpu->mode = IN_GUEST_MODE;
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (req_immediate_exit)
                smp_send_reschedule(vcpu->cpu);
 
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
+       kvm_put_guest_xcr0(vcpu);
+
        /* Interrupt is enabled by handle_external_intr() */
        kvm_x86_ops->handle_external_intr(vcpu);
 
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
         * and assume host would use all available bits.
         * Guest xcr0 would be loaded later.
         */
-       kvm_put_guest_xcr0(vcpu);
        vcpu->guest_fpu_loaded = 1;
        __kernel_fpu_begin();
        __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       kvm_put_guest_xcr0(vcpu);
-
        if (!vcpu->guest_fpu_loaded) {
                vcpu->fpu_counter = 0;
                return;
index 8bea84724a7da4bd1c6d3a0509166618c39e5349..f65a33f505b683a02aa7543ea653e54a8cdedc28 100644 (file)
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup);
 
 void x86_configure_nx(void)
 {
-       /* If disable_nx is set, clear NX on all new mappings going forward. */
-       if (disable_nx)
+       if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+               __supported_pte_mask |= _PAGE_NX;
+       else
                __supported_pte_mask &= ~_PAGE_NX;
 }
 
index a2433817c987833f525380d8425bb6e902873bbd..6a2f5691b1ab57261809617ca9a9b375adfe135e 100644 (file)
@@ -43,40 +43,40 @@ void __init efi_bgrt_init(void)
                return;
 
        if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
-               pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
+               pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       bgrt_tab->header.length, sizeof(*bgrt_tab));
                return;
        }
        if (bgrt_tab->version != 1) {
-               pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
+               pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
                       bgrt_tab->version);
                return;
        }
        if (bgrt_tab->status & 0xfe) {
-               pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
+               pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
                       bgrt_tab->status);
                return;
        }
        if (bgrt_tab->image_type != 0) {
-               pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
+               pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt_tab->image_type);
                return;
        }
        if (!bgrt_tab->image_address) {
-               pr_err("Ignoring BGRT: null image address\n");
+               pr_notice("Ignoring BGRT: null image address\n");
                return;
        }
 
        image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image header memory\n");
+               pr_notice("Ignoring BGRT: failed to map image header memory\n");
                return;
        }
 
        memcpy(&bmp_header, image, sizeof(bmp_header));
        memunmap(image);
        if (bmp_header.id != 0x4d42) {
-               pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+               pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
                        bmp_header.id);
                return;
        }
@@ -84,14 +84,14 @@ void __init efi_bgrt_init(void)
 
        bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
        if (!bgrt_image) {
-               pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
+               pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
                       bgrt_image_size);
                return;
        }
 
        image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image memory\n");
+               pr_notice("Ignoring BGRT: failed to map image memory\n");
                kfree(bgrt_image);
                bgrt_image = NULL;
                return;
index 55d38cfa46c2626c6d2f85587da27c05ca3e6bf7..9e02dcaef68311ed376f8fcd0579d6c207e80103 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/pci.h>
 
 #include <asm/mce.h>
+#include <asm/smp.h>
 #include <asm/amd_nb.h>
 #include <asm/irq_vectors.h>
 
@@ -206,7 +207,7 @@ static u32 get_nbc_for_node(int node_id)
        struct cpuinfo_x86 *c = &boot_cpu_data;
        u32 cores_per_node;
 
-       cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
+       cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
 
        return cores_per_node * node_id;
 }
index abf4901c917bacd58e8172b4566ad8b3eb10a467..db52a7fafcc2ce7dc9b4294065ef9807f9a311a1 100644 (file)
@@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg)
 
        ret = HYPERVISOR_platform_op(&op);
        if (ret)
-               return 0;
+               op.u.pcpu_info.apic_id = BAD_APICID;
 
        return op.u.pcpu_info.apic_id << 24;
 }
@@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid)
 {
 }
 
+static int xen_cpu_present_to_apicid(int cpu)
+{
+       if (cpu_present(cpu))
+               return xen_get_apic_id(xen_apic_read(APIC_ID));
+       else
+               return BAD_APICID;
+}
+
 static struct apic xen_pv_apic = {
        .name                           = "Xen PV",
        .probe                          = xen_apic_probe_pv,
@@ -162,7 +170,7 @@ static struct apic xen_pv_apic = {
 
        .ioapic_phys_id_map             = default_ioapic_phys_id_map, /* Used on 32-bit */
        .setup_apic_routing             = NULL,
-       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .cpu_present_to_apicid          = xen_cpu_present_to_apicid,
        .apicid_to_cpu_present          = physid_set_mask_of_physid, /* Used on 32-bit */
        .check_phys_apicid_present      = default_check_phys_apicid_present, /* smp_sanity_check needs it */
        .phys_pkg_id                    = xen_phys_pkg_id, /* detect_ht */
index 3c6d17fd423a82006d89c71e7345bb6a566aec09..719cf291dcdf195e68e52c97dd054686192aa936 100644 (file)
@@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
         * data back is to call:
         */
        tick_nohz_idle_enter();
+
+       cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
index 9e2ba5c6e1dd7be4a0b10a70b315cf5f0f20c081..f42e78de1e107d662e3d806d1dc88ceeaf77ca02 100644 (file)
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
 
 static void xen_qlock_kick(int cpu)
 {
+       int irq = per_cpu(lock_kicker_irq, cpu);
+
+       /* Don't kick if the target's kicker interrupt is not initialized. */
+       if (irq == -1)
+               return;
+
        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
 }
 
index 976a3859453707b5d2acaa1e7f80b0e81a16a74e..66a5d15a9e0e27b613a0fa4694b46f96aaadf9d1 100644 (file)
@@ -428,7 +428,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (len == skb->len) {
                lp->stats.tx_packets++;
                lp->stats.tx_bytes += skb->len;
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                netif_start_queue(dev);
 
                /* this is normally done in the interrupt when tx finishes */
index f124a0a624fcbeea867865e7a53f634275dfb4b8..807d25e466ec2ab7ae8c319e62a5621dc4fc29a7 100644 (file)
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                 * release the pages we didn't map into the bio, if any
                 */
                while (j < page_limit)
-                       page_cache_release(pages[j++]);
+                       put_page(pages[j++]);
        }
 
        kfree(pages);
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        for (j = 0; j < nr_pages; j++) {
                if (!pages[j])
                        break;
-               page_cache_release(pages[j]);
+               put_page(pages[j]);
        }
  out:
        kfree(pages);
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
                if (bio_data_dir(bio) == READ)
                        set_page_dirty_lock(bvec->bv_page);
 
-               page_cache_release(bvec->bv_page);
+               put_page(bvec->bv_page);
        }
 
        bio_put(bio);
@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
  * the BIO and the offending pages and re-dirty the pages in process context.
  *
  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on.  It will run one page_cache_release() against each page and will
- * run one bio_put() against the BIO.
+ * here on.  It will run one put_page() against each page and will run one
+ * bio_put() against the BIO.
  */
 
 static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
                struct page *page = bvec->bv_page;
 
                if (PageDirty(page) || PageCompound(page)) {
-                       page_cache_release(page);
+                       put_page(page);
                        bvec->bv_page = NULL;
                } else {
                        nr_clean_pages++;
index 827f8badd143fbf7571b608264571691af6686c5..b60537b2c35b4152343c0239374d8ba332865b09 100644 (file)
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
                goto fail_id;
 
        q->backing_dev_info.ra_pages =
-                       (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+                       (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
        q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
        q->backing_dev_info.name = "block";
        q->node = node_id;
index c7bb666aafd100a329c67b97e616c9f9037c508e..331e4eee0dda0c29cc673b63c7e7341ae45e5859 100644 (file)
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
        struct queue_limits *limits = &q->limits;
        unsigned int max_sectors;
 
-       if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
-               max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+       if ((max_hw_sectors << 9) < PAGE_SIZE) {
+               max_hw_sectors = 1 << (PAGE_SHIFT - 9);
                printk(KERN_INFO "%s: set to minimum %d\n",
                       __func__, max_hw_sectors);
        }
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
  **/
 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
-       if (max_size < PAGE_CACHE_SIZE) {
-               max_size = PAGE_CACHE_SIZE;
+       if (max_size < PAGE_SIZE) {
+               max_size = PAGE_SIZE;
                printk(KERN_INFO "%s: set to minimum %d\n",
                       __func__, max_size);
        }
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  **/
 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
-       if (mask < PAGE_CACHE_SIZE - 1) {
-               mask = PAGE_CACHE_SIZE - 1;
+       if (mask < PAGE_SIZE - 1) {
+               mask = PAGE_SIZE - 1;
                printk(KERN_INFO "%s: set to minimum %lx\n",
                       __func__, mask);
        }
index dd93763057ce0187dcc1b81602b75a43310d451d..995b58d46ed109b0c7241b6db5b870ed911dc845 100644 (file)
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
        unsigned long ra_kb = q->backing_dev_info.ra_pages <<
-                                       (PAGE_CACHE_SHIFT - 10);
+                                       (PAGE_SHIFT - 10);
 
        return queue_var_show(ra_kb, (page));
 }
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
        if (ret < 0)
                return ret;
 
-       q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+       q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 
        return ret;
 }
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
        if (blk_queue_cluster(q))
                return queue_var_show(queue_max_segment_size(q), (page));
 
-       return queue_var_show(PAGE_CACHE_SIZE, (page));
+       return queue_var_show(PAGE_SIZE, (page));
 }
 
 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 {
        unsigned long max_sectors_kb,
                max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
-                       page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+                       page_kb = 1 << (PAGE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 
        if (ret < 0)
index e3c591dd8f19d0b46fe42dc842510f9d220af143..4a349787bc6280b30d224c87d6800bef0ddd9a60 100644 (file)
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * idle timer unplug to continue working.
                 */
                if (cfq_cfqq_wait_request(cfqq)) {
-                       if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+                       if (blk_rq_bytes(rq) > PAGE_SIZE ||
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
index f678c733df404189bdff2a87a01225c6aa124816..556826ac7cb4837c44d15d9f5576469d578055b3 100644 (file)
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                        return -EINVAL;
                bdi = blk_get_backing_dev_info(bdev);
                return compat_put_long(arg,
-                                      (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+                                      (bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKROGET: /* compatible */
                return compat_put_int(arg, bdev_read_only(bdev) != 0);
        case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                if (!capable(CAP_SYS_ADMIN))
                        return -EACCES;
                bdi = blk_get_backing_dev_info(bdev);
-               bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+               bdi->ra_pages = (arg * 512) / PAGE_SIZE;
                return 0;
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
index d8996bbd7f12805b9c74429813f5b3ee3d744bdc..4ff1f92f89ca0cec08830a71104e38d928592583 100644 (file)
@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                if (!arg)
                        return -EINVAL;
                bdi = blk_get_backing_dev_info(bdev);
-               return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+               return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKROGET:
                return put_int(arg, bdev_read_only(bdev) != 0);
        case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                if(!capable(CAP_SYS_ADMIN))
                        return -EACCES;
                bdi = blk_get_backing_dev_info(bdev);
-               bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+               bdi->ra_pages = (arg * 512) / PAGE_SIZE;
                return 0;
        case BLKBSZSET:
                return blkdev_bszset(bdev, mode, argp);
index 5d87019410542951c8c94afffe9e4df62fdc8755..d7eb77e1e3a8f4be13f6016ed0e7f7ab7c051da8 100644 (file)
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
                        goto out_del;
        }
 
+       err = hd_ref_init(p);
+       if (err) {
+               if (flags & ADDPART_FLAG_WHOLEDISK)
+                       goto out_remove_file;
+               goto out_del;
+       }
+
        /* everything is up and running, commence */
        rcu_assign_pointer(ptbl->part[partno], p);
 
        /* suppress uevent if the disk suppresses it */
        if (!dev_get_uevent_suppress(ddev))
                kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
-       if (!hd_ref_init(p))
-               return p;
+       return p;
 
 out_free_info:
        free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
 out_free:
        kfree(p);
        return ERR_PTR(err);
+out_remove_file:
+       device_remove_file(pdev, &dev_attr_whole_disk);
 out_del:
        kobject_put(p->holder_dir);
        device_del(pdev);
@@ -566,8 +573,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
 {
        struct address_space *mapping = bdev->bd_inode->i_mapping;
 
-       return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
-                       NULL);
+       return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
+                                NULL);
 }
 
 unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +591,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
                if (PageError(page))
                        goto fail;
                p->v = page;
-               return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+               return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
 fail:
-               page_cache_release(page);
+               put_page(page);
        }
        p->v = NULL;
        return NULL;
index 93a1fdc1feee68c9a8b15bef682886015884ce98..1d33beb6a1ae5a9378905cca7b9fa7323a96ff2b 100644 (file)
@@ -96,6 +96,7 @@ config CRYPTO_AKCIPHER
 config CRYPTO_RSA
        tristate "RSA algorithm"
        select CRYPTO_AKCIPHER
+       select CRYPTO_MANAGER
        select MPILIB
        select ASN1
        help
index 5fc1f172963dc6914f0f6def8435943acd67dfe7..3887a98abcc3c255ccc37de87f11411b17522086 100644 (file)
@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
        struct scatterlist *sg;
 
        sg = walk->sg;
-       walk->pg = sg_page(sg);
        walk->offset = sg->offset;
+       walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+       walk->offset = offset_in_page(walk->offset);
        walk->entrylen = sg->length;
 
        if (walk->entrylen > walk->total)
index 1cea67d43e1db25c55db64b2f805f035428faa29..ead8dc0d084e749e35733abc164f1e209aca3f6f 100644 (file)
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size ;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
index 1982310e6d83a3aa6a6e510e8f72682fb1c05226..da198b8641074cff2830687be53ffa79ba65df0c 100644 (file)
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                                obj_desc->method.mutex->mutex.
                                    original_sync_level =
                                    obj_desc->method.mutex->mutex.sync_level;
+
+                               obj_desc->method.mutex->mutex.thread_id =
+                                   acpi_os_get_thread_id();
                        }
                }
 
index d0f35e63640ba78b956dd6ed3b870a542fedaac5..63cc9dbe4f3b4d1db7037fb4ca1df532b8dc5534 100644 (file)
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                        offset);
                        rc = -ENXIO;
                }
-       } else
+       } else {
                rc = 0;
+               if (cmd_rc)
+                       *cmd_rc = xlat_status(buf, cmd);
+       }
 
  out:
        ACPI_FREE(out_obj);
index 5083f85efea76edb7f2968ffa967becbf4d4943a..cfa936a32513b1d70d74834fa743b6f612665dc1 100644 (file)
@@ -202,6 +202,14 @@ config SATA_FSL
 
          If unsure, say N.
 
+config SATA_AHCI_SEATTLE
+       tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+       depends on ARCH_SEATTLE
+       help
+        This option enables support for AMD Seattle SATA host controller.
+
+        If unsure, say N
+
 config SATA_INIC162X
        tristate "Initio 162x SATA support (Very Experimental)"
        depends on PCI
index 18579521464e48e22001838ab4d0abf3efbc4b0a..0b2afb7e5f359979f7a54cdadb3178b174c885b0 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA)               += libata.o
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)                += ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)  += acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE)        += ahci_seattle.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_FSL)         += sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)    += sata_inic162x.o
index 40442332bfa7c154b93b540eae3eb8ae94e6d9f3..62a04c8fb5c99c9ab0614bb6dc8f0146c47329e6 100644 (file)
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
+       of_property_read_u32(dev->of_node,
+                            "ports-implemented", &hpriv->force_port_map);
+
        if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
                hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644 (file)
index 0000000..6e702ab
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit         Type            Description
+ * 31          RW              OD7.2 (activity)
+ * 30          RW              OD7.1 (locate)
+ * 29          RW              OD7.0 (fault)
+ * 28...8      RW              OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7           RO              SGPIO feature flag
+ * 6:4         RO              Reserved
+ * 3:0         RO              Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x)            (8 + (3 * x))
+#define LOCATE_BIT_POS(x)              (ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x)               (LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK                  0x00010000
+#define LOCATE_MASK                    0x00080000
+#define FAULT_MASK                     0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size);
+
+struct seattle_plat_data {
+       void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+       .inherits               = &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+       .inherits               = &ahci_ops,
+       .transmit_led_message   = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+       .flags          = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+       .link_flags     = ATA_LFLAG_SW_ACTIVITY,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size)
+{
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct ahci_port_priv *pp = ap->private_data;
+       struct seattle_plat_data *plat_data = hpriv->plat_data;
+       unsigned long flags;
+       int pmp;
+       struct ahci_em_priv *emp;
+       u32 val;
+
+       /* get the slot number from the message */
+       pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+       if (pmp >= EM_MAX_SLOTS)
+               return -EINVAL;
+       emp = &pp->em_priv[pmp];
+
+       val = ioread32(plat_data->sgpio_ctrl);
+       if (state & ACTIVITY_MASK)
+               val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+       if (state & LOCATE_MASK)
+               val |= 1 << LOCATE_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+       if (state & FAULT_MASK)
+               val |= 1 << FAULT_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+       iowrite32(val, plat_data->sgpio_ctrl);
+
+       spin_lock_irqsave(ap->lock, flags);
+
+       /* save off new led state for port/slot */
+       emp->led_state = state;
+
+       spin_unlock_irqrestore(ap->lock, flags);
+
+       return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+               struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+       struct device *dev = &pdev->dev;
+       struct seattle_plat_data *plat_data;
+       u32 val;
+
+       plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+       if (IS_ERR(plat_data))
+               return &ahci_port_info;
+
+       plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+                             platform_get_resource(pdev, IORESOURCE_MEM, 1));
+       if (IS_ERR(plat_data->sgpio_ctrl))
+               return &ahci_port_info;
+
+       val = ioread32(plat_data->sgpio_ctrl);
+
+       if (!(val & 0xf))
+               return &ahci_port_info;
+
+       hpriv->em_loc = 0;
+       hpriv->em_buf_sz = 4;
+       hpriv->em_msg_type = EM_MSG_TYPE_LED;
+       hpriv->plat_data = plat_data;
+
+       dev_info(dev, "SGPIO LED control is enabled.\n");
+       return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct ahci_host_priv *hpriv;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_platform_init_host(pdev, hpriv,
+                                    ahci_seattle_get_port_info(pdev, hpriv),
+                                    &ahci_platform_sht);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+                        ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+       { "AMDI0600", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+       .probe = ahci_seattle_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = DRV_NAME,
+               .acpi_match_table = ahci_acpi_match,
+               .pm = &ahci_pm_ops,
+       },
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
index 3982054060b81bb0d253b001883b32e346c6dc74..a5d7c1c2a05ee26dc80a41d440357c4a7be96a20 100644 (file)
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
                dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
                         port_map, hpriv->force_port_map);
                port_map = hpriv->force_port_map;
+               hpriv->saved_port_map = port_map;
        }
 
        if (hpriv->mask_port_map) {
index 433b60092972d56abba55897158d6c22156cf631..d8f4cc22856c924b1be7bf1aa97f175b6579c554 100644 (file)
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
        reg = opp_table->regulator;
        if (IS_ERR(reg)) {
                /* Regulator may not be required for device */
-               if (reg)
-                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
-                               PTR_ERR(reg));
                rcu_read_unlock();
                return 0;
        }
index a1e0b9ab847a345c6a09adab3ff9f2fd9af3ad3e..5fb7718f256cf9d7bd45229dcc6e8a0b2225b1c5 100644 (file)
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
                return -EEXIST;
        }
        dev->power.wakeup = ws;
+       if (dev->power.wakeirq)
+               device_wakeup_attach_irq(dev, dev->power.wakeirq);
        spin_unlock_irq(&dev->power.lock);
        return 0;
 }
index 9b1a65debd49e645d41343eac7d54a4369ddc2fd..7f692accdc90ec296c55ba738b2cd7f65426ba26 100644 (file)
@@ -21,7 +21,7 @@
 
 static inline bool is_pset_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_PDATA;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
 }
 
 static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
index 786be8fed39e980add0b9e1cddc9e0682171dc9a..1f635471f318cb9d9395e26466ea9abebf6ddb7f 100644 (file)
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
                                                     struct bcma_device *core)
 {
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
        struct of_phandle_args out_irq;
        int ret;
 
-       if (!parent || !parent->dev.of_node)
+       if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
                return 0;
 
        ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
 {
        struct device_node *node;
 
+       if (!IS_ENABLED(CONFIG_OF_IRQ))
+               return;
+
        node = bcma_of_find_child_device(parent, core);
        if (node)
                core->dev.of_node = node;
 
        core->irq = bcma_of_get_irq(parent, core, 0);
 }
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
-                               struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
-                                          struct bcma_device *core, int num)
-{
-       return 0;
-}
-#endif /* CONFIG_OF */
 
 unsigned int bcma_core_irq(struct bcma_device *core, int num)
 {
index dd73e1ff1759c902db1734ac94975abea11e1b02..ec9d8610b25f772eefe20917cc8921db17c29016 100644 (file)
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
        WARN_ON(d->flags & DEVFL_UP);
        blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
        q->backing_dev_info.name = "aoe";
-       q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+       q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
        d->bufpool = mp;
        d->blkq = gd->queue = q;
        q->queuedata = d;
index f7ecc287d733b6b76a0fb0411f16c7f55907671f..51a071e322213982247aa7ff01a15d6057d3fb34 100644 (file)
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
                       struct page *page, int rw)
 {
        struct brd_device *brd = bdev->bd_disk->private_data;
-       int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
+       int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
        page_endio(page, rw & WRITE, err);
        return err;
 }
index c227fd4cad75fe62eba5bcfd8b0ef22742a080e3..7a1cf7eaa71dc8adccd1a5a5d332edacda8df40f 100644 (file)
@@ -1327,8 +1327,8 @@ struct bm_extent {
 #endif
 #endif
 
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
  * Since we may live in a mixed-platform cluster,
  * we limit us to a platform agnostic constant here for now.
  * A followup commit may allow even bigger BIO sizes,
index 226eb0c9f0fb33a7ee795dbf3661ec5bac58da77..0bac9c8246bc40cf47c330aa8db7cb7410932aaf 100644 (file)
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
        blk_queue_max_hw_sectors(q, max_hw_sectors);
        /* This is the workaround for "bio would need to, but cannot, be split" */
        blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
-       blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+       blk_queue_segment_boundary(q, PAGE_SIZE-1);
 
        if (b) {
                struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -3633,14 +3633,15 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
                goto nla_put_failure;
        if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
            nla_put_u32(skb, T_current_state, device->state.i) ||
-           nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
-           nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
-           nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
-           nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
-           nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
-           nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
-           nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
-           nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+           nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
+           nla_put_u64_0pad(skb, T_capacity,
+                            drbd_get_capacity(device->this_bdev)) ||
+           nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
+           nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
+           nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
+           nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
+           nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+           nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
            nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
            nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
            nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
@@ -3657,13 +3658,16 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
                        goto nla_put_failure;
 
                if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
-                   nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
-                   nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
+                   nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
+                   nla_put_u64_0pad(skb, T_bits_oos,
+                                    drbd_bm_total_weight(device)))
                        goto nla_put_failure;
                if (C_SYNC_SOURCE <= device->state.conn &&
                    C_PAUSED_SYNC_T >= device->state.conn) {
-                       if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
-                           nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
+                       if (nla_put_u64_0pad(skb, T_bits_rs_total,
+                                            device->rs_total) ||
+                           nla_put_u64_0pad(skb, T_bits_rs_failed,
+                                            device->rs_failed))
                                goto nla_put_failure;
                }
        }
index 423f4ca7d712dda6f012c32954f19c9ce3af9d9c..80cf8add46ff3667d896fca88aaea3fbf338ad27 100644 (file)
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
        bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
        iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
                      bio_segments(bio), blk_rq_bytes(cmd->rq));
+       /*
+        * This bio may be started from the middle of the 'bvec'
+        * because of bio splitting, so offset from the bvec must
+        * be passed to iov iterator
+        */
+       iter.iov_offset = bio->bi_iter.bi_bvec_done;
 
        cmd->iocb.ki_pos = pos;
        cmd->iocb.ki_filp = file;
index 9c6234428607fb60a6d65a3ea0c1eebfa72b6b96..0ede6d7e25686cf768e3e74d7611e5e61319d6e7 100644 (file)
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
                                u8 *order, u64 *snap_size);
 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
                u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
 
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
@@ -1953,7 +1952,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
 
        osdc = &rbd_dev->rbd_client->client->osdc;
        osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
-                                         GFP_ATOMIC);
+                                         GFP_NOIO);
        if (!osd_req)
                return NULL;    /* ENOMEM */
 
@@ -2002,7 +2001,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
        rbd_dev = img_request->rbd_dev;
        osdc = &rbd_dev->rbd_client->client->osdc;
        osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
-                                               false, GFP_ATOMIC);
+                                               false, GFP_NOIO);
        if (!osd_req)
                return NULL;    /* ENOMEM */
 
@@ -2504,7 +2503,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
                                        bio_chain_clone_range(&bio_list,
                                                                &bio_offset,
                                                                clone_size,
-                                                               GFP_ATOMIC);
+                                                               GFP_NOIO);
                        if (!obj_request->bio_list)
                                goto out_unwind;
                } else if (type == OBJ_REQUEST_PAGES) {
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        struct rbd_device *rbd_dev = (struct rbd_device *)data;
        int ret;
 
-       if (!rbd_dev)
-               return;
-
        dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
                rbd_dev->header_name, (unsigned long long)notify_id,
                (unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
 
        ceph_osdc_cancel_event(rbd_dev->watch_event);
        rbd_dev->watch_event = NULL;
+
+       dout("%s flushing notifies\n", __func__);
+       ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 }
 
 /*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
 {
        sector_t size;
-       bool removing;
 
        /*
-        * Don't hold the lock while doing disk operations,
-        * or lock ordering will conflict with the bdev mutex via:
-        * rbd_add() -> blkdev_get() -> rbd_open()
+        * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+        * try to update its size.  If REMOVING is set, updating size
+        * is just useless work since the device can't be opened.
         */
-       spin_lock_irq(&rbd_dev->lock);
-       removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
-       spin_unlock_irq(&rbd_dev->lock);
-       /*
-        * If the device is being removed, rbd_dev->disk has
-        * been destroyed, so don't try to update its size
-        */
-       if (!removing) {
+       if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+           !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
                size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
                dout("setting size to %llu sectors", (unsigned long long)size);
                set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
                __le64 features;
                __le64 incompat;
        } __attribute__ ((packed)) features_buf = { 0 };
-       u64 incompat;
+       u64 unsup;
        int ret;
 
        ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
        if (ret < sizeof (features_buf))
                return -ERANGE;
 
-       incompat = le64_to_cpu(features_buf.incompat);
-       if (incompat & ~RBD_FEATURES_SUPPORTED)
+       unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+       if (unsup) {
+               rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+                        unsup);
                return -ENXIO;
+       }
 
        *snap_features = le64_to_cpu(features_buf.features);
 
@@ -5187,6 +5182,10 @@ out_err:
        return ret;
 }
 
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 {
        int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 
        ret = rbd_dev_id_get(rbd_dev);
        if (ret)
-               return ret;
+               goto err_out_unlock;
 
        BUILD_BUG_ON(DEV_NAME_LEN
                        < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        /* Everything's ready.  Announce the disk to the world. */
 
        set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-       add_disk(rbd_dev->disk);
+       up_write(&rbd_dev->header_rwsem);
 
+       add_disk(rbd_dev->disk);
        pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
                (unsigned long long) rbd_dev->mapping.size);
 
@@ -5252,6 +5252,8 @@ err_out_blkdev:
                unregister_blkdev(rbd_dev->major, rbd_dev->name);
 err_out_id:
        rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+       up_write(&rbd_dev->header_rwsem);
        return ret;
 }
 
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        spec = NULL;            /* rbd_dev now owns this */
        rbd_opts = NULL;        /* rbd_dev now owns this */
 
+       down_write(&rbd_dev->header_rwsem);
        rc = rbd_dev_image_probe(rbd_dev, 0);
        if (rc < 0)
                goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
        return rc;
 
 err_out_rbd_dev:
+       up_write(&rbd_dev->header_rwsem);
        rbd_dev_destroy(rbd_dev);
 err_out_client:
        rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
                return ret;
 
        rbd_dev_header_unwatch_sync(rbd_dev);
-       /*
-        * flush remaining watch callbacks - these must be complete
-        * before the osd_client is shutdown
-        */
-       dout("%s: flushing notifies", __func__);
-       ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 
        /*
         * Don't free anything from rbd_dev->disk until after all
index 47ca4b39d3065658c04d6b95add0b648da617a07..641c2d19fc5797f62ea6ee33c7ef8e64cc4ecc6c 100644 (file)
@@ -206,7 +206,8 @@ static int ath3k_load_firmware(struct usb_device *udev,
                                const struct firmware *firmware)
 {
        u8 *send_buf;
-       int err, pipe, len, size, sent = 0;
+       int len = 0;
+       int err, pipe, size, sent = 0;
        int count = firmware->size;
 
        BT_DBG("udev %p", udev);
@@ -302,7 +303,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
                const struct firmware *firmware)
 {
        u8 *send_buf;
-       int err, pipe, len, size, count, sent = 0;
+       int len = 0;
+       int err, pipe, size, count, sent = 0;
        int ret;
 
        count = firmware->size;
index 05904732e6f1502a630a45f842120da0f6dff68d..f742384b53f7d1067fb15c21f35f9088689167a1 100644 (file)
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <net/bluetooth/bluetooth.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
 
 #define BTM_HEADER_LEN                 4
 #define BTM_UPLD_SIZE                  2312
index f25a825a693fa790dfb626b00367f4704a9a7f0b..7ad8d61c0c61c6a1b4c5623b8e2621fee82fe745 100644 (file)
@@ -510,34 +510,39 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv,
 static int btmrvl_check_device_tree(struct btmrvl_private *priv)
 {
        struct device_node *dt_node;
+       struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
        u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
-       int ret;
-       u32 val;
+       int ret = 0;
+       u16 gpio, gap;
+
+       if (card->plt_of_node) {
+               dt_node = card->plt_of_node;
+               ret = of_property_read_u16(dt_node, "marvell,wakeup-pin",
+                                          &gpio);
+               if (ret)
+                       gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+
+               ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms",
+                                          &gap);
+               if (ret)
+                       gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff);
 
-       for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") {
-               ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val);
-               if (!ret)
-                       priv->btmrvl_dev.gpio_gap = val;
+               priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap;
 
-               ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data",
+               ret = of_property_read_u8_array(dt_node, "marvell,cal-data",
                                                cal_data + BT_CAL_HDR_LEN,
                                                BT_CAL_DATA_SIZE);
-               if (ret) {
-                       of_node_put(dt_node);
+               if (ret)
                        return ret;
-               }
 
                BT_DBG("Use cal data from device tree");
                ret = btmrvl_download_cal_data(priv, cal_data,
                                               BT_CAL_DATA_SIZE);
-               if (ret) {
+               if (ret)
                        BT_ERR("Fail to download calibrate data");
-                       of_node_put(dt_node);
-                       return ret;
-               }
        }
 
-       return 0;
+       return ret;
 }
 
 static int btmrvl_setup(struct hci_dev *hdev)
index c6ef248de5e44880e2c7c5269010f95eb22fbf23..f425ddf91a2459c7af330584da6ab9b5018a84a6 100644 (file)
@@ -52,6 +52,68 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
        {"EXTLAST", NULL, 0, 0xFE},
 };
 
+static const struct of_device_id btmrvl_sdio_of_match_table[] = {
+       { .compatible = "marvell,sd8897-bt" },
+       { .compatible = "marvell,sd8997-bt" },
+       { }
+};
+
+static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv)
+{
+       struct btmrvl_plt_wake_cfg *cfg = priv;
+
+       if (cfg->irq_bt >= 0) {
+               pr_info("%s: wake by bt", __func__);
+               cfg->wake_by_bt = true;
+               disable_irq_nosync(irq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* This function parses device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * If the device tree node exists and includes interrupts attributes, this
+ * function will request platform specific wakeup interrupt.
+ */
+static int btmrvl_sdio_probe_of(struct device *dev,
+                               struct btmrvl_sdio_card *card)
+{
+       struct btmrvl_plt_wake_cfg *cfg;
+       int ret;
+
+       if (!dev->of_node ||
+           !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) {
+               pr_err("sdio platform data not available");
+               return -1;
+       }
+
+       card->plt_of_node = dev->of_node;
+
+       card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+                                         GFP_KERNEL);
+       cfg = card->plt_wake_cfg;
+       if (cfg && card->plt_of_node) {
+               cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
+               if (!cfg->irq_bt) {
+                       dev_err(dev, "fail to parse irq_bt from device tree");
+               } else {
+                       ret = devm_request_irq(dev, cfg->irq_bt,
+                                              btmrvl_wake_irq_bt,
+                                              IRQF_TRIGGER_LOW,
+                                              "bt_wake", cfg);
+                       if (ret) {
+                               dev_err(dev,
+                                       "Failed to request irq_bt %d (%d)\n",
+                                       cfg->irq_bt, ret);
+                       }
+                       disable_irq(cfg->irq_bt);
+               }
+       }
+
+       return 0;
+}
+
 /* The btmrvl_sdio_remove() callback function is called
  * when user removes this module from kernel space or ejects
  * the card from the slot. The driver handles these 2 cases
@@ -1464,6 +1526,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
 
        btmrvl_sdio_enable_host_int(card);
 
+       /* Device tree node parsing and platform specific configuration*/
+       btmrvl_sdio_probe_of(&func->dev, card);
+
        priv = btmrvl_add_card(card);
        if (!priv) {
                BT_ERR("Initializing card failed!");
@@ -1544,6 +1609,13 @@ static int btmrvl_sdio_suspend(struct device *dev)
                return 0;
        }
 
+       /* Enable platform specific wakeup interrupt */
+       if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+               card->plt_wake_cfg->wake_by_bt = false;
+               enable_irq(card->plt_wake_cfg->irq_bt);
+               enable_irq_wake(card->plt_wake_cfg->irq_bt);
+       }
+
        priv = card->priv;
        priv->adapter->is_suspending = true;
        hcidev = priv->btmrvl_dev.hcidev;
@@ -1606,6 +1678,13 @@ static int btmrvl_sdio_resume(struct device *dev)
        BT_DBG("%s: SDIO resume", hcidev->name);
        hci_resume_dev(hcidev);
 
+       /* Disable platform specific wakeup interrupt */
+       if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+               disable_irq_wake(card->plt_wake_cfg->irq_bt);
+               if (!card->plt_wake_cfg->wake_by_bt)
+                       disable_irq(card->plt_wake_cfg->irq_bt);
+       }
+
        return 0;
 }
 
index 1a3bd064c44249a7f543d6e8c9eccdb7eefec92d..3a522d23ee6e52c3d5e708f973e25ba5e03a425f 100644 (file)
 
 #define FIRMWARE_READY                         0xfedc
 
+struct btmrvl_plt_wake_cfg {
+       int irq_bt;
+       bool wake_by_bt;
+};
 
 struct btmrvl_sdio_card_reg {
        u8 cfg;
@@ -97,6 +101,8 @@ struct btmrvl_sdio_card {
        u16 sd_blksz_fw_dl;
        u8 rx_unit;
        struct btmrvl_private *priv;
+       struct device_node *plt_of_node;
+       struct btmrvl_plt_wake_cfg *plt_wake_cfg;
 };
 
 struct btmrvl_sdio_device {
index 0d4e372e426d8e15c846c3bcdca7e410aa7604e8..6aae9590511a11d0880cb29105502c14cdd62faf 100644 (file)
@@ -2001,12 +2001,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
                return -EINVAL;
        }
 
-       /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
-        * supported by this firmware loading method. This check has been
-        * put in place to ensure correct forward compatibility options
-        * when newer hardware variants come along.
+       /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
+        * and 0x0c (WsP) are supported by this firmware loading method.
+        *
+        * This check has been put in place to ensure correct forward
+        * compatibility options when newer hardware variants come along.
         */
-       if (ver.hw_variant != 0x0b) {
+       if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) {
                BT_ERR("%s: Unsupported Intel hardware variant (%u)",
                       hdev->name, ver.hw_variant);
                return -EINVAL;
index d8881dc0600cdad24c2e144cd692dd5b51680550..1c97eda8bae3e19e0014d36222b1cd0d31281bbf 100644 (file)
@@ -825,6 +825,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
        { "BCM2E64", 0 },
        { "BCM2E65", 0 },
        { "BCM2E67", 0 },
+       { "BCM2E71", 0 },
        { "BCM2E7B", 0 },
        { "BCM2E7C", 0 },
        { },
index 064f2fefad6228748f789364a66437ecb527353c..d7d23ceba4d15eb5927436f5279a401222f679d9 100644 (file)
@@ -102,13 +102,12 @@ static const u16 crc_table[] = {
 /* Initialise the crc calculator */
 #define BCSP_CRC_INIT(x) x = 0xffff
 
-/*
-   Update crc with next data byte
-
-   Implementation note
-        The data byte is treated as two nibbles.  The crc is generated
-        in reverse, i.e., bits are fed into the register from the top.
-*/
+/* Update crc with next data byte
+ *
+ * Implementation note
+ *     The data byte is treated as two nibbles.  The crc is generated
+ *     in reverse, i.e., bits are fed into the register from the top.
+ */
 static void bcsp_crc_update(u16 *crc, u8 d)
 {
        u16 reg = *crc;
@@ -223,9 +222,10 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
        }
 
        /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2
-          (because bytes 0xc0 and 0xdb are escaped, worst case is
-          when the packet is all made of 0xc0 and 0xdb :) )
-          + 2 (0xc0 delimiters at start and end). */
+        * (because bytes 0xc0 and 0xdb are escaped, worst case is
+        * when the packet is all made of 0xc0 and 0xdb :) )
+        * + 2 (0xc0 delimiters at start and end).
+        */
 
        nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
        if (!nskb)
@@ -285,7 +285,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
        struct bcsp_struct *bcsp = hu->priv;
        unsigned long flags;
        struct sk_buff *skb;
-       
+
        /* First of all, check for unreliable messages in the queue,
           since they have priority */
 
@@ -305,8 +305,9 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
        }
 
        /* Now, try to send a reliable pkt. We can only send a
-          reliable packet if the number of packets sent but not yet ack'ed
-          is < than the winsize */
+        * reliable packet if the number of packets sent but not yet ack'ed
+        * is < than the winsize
+        */
 
        spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
@@ -332,12 +333,14 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
        spin_unlock_irqrestore(&bcsp->unack.lock, flags);
 
        /* We could not send a reliable packet, either because there are
-          none or because there are too many unack'ed pkts. Did we receive
-          any packets we have not acknowledged yet ? */
+        * none or because there are too many unack'ed pkts. Did we receive
+        * any packets we have not acknowledged yet ?
+        */
 
        if (bcsp->txack_req) {
                /* if so, craft an empty ACK pkt and send it on BCSP unreliable
-                  channel 0 */
+                * channel 0
+                */
                struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
                return nskb;
        }
@@ -399,8 +402,9 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
 }
 
 /* Handle BCSP link-establishment packets. When we
-   detect a "sync" packet, symptom that the BT module has reset,
-   we do nothing :) (yet) */
+ * detect a "sync" packet, symptom that the BT module has reset,
+ * we do nothing :) (yet)
+ */
 static void bcsp_handle_le_pkt(struct hci_uart *hu)
 {
        struct bcsp_struct *bcsp = hu->priv;
@@ -462,7 +466,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
                case 0xdd:
                        memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
                        if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
-                                       bcsp->rx_state != BCSP_W4_CRC) 
+                                       bcsp->rx_state != BCSP_W4_CRC)
                                bcsp_crc_update(&bcsp->message_crc, 0xdb);
                        bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
                        bcsp->rx_count--;
@@ -534,7 +538,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
                        } else {
                                BT_ERR("Packet for unknown channel (%u %s)",
                                        bcsp->rx_skb->data[1] & 0x0f,
-                                       bcsp->rx_skb->data[0] & 0x80 ? 
+                                       bcsp->rx_skb->data[0] & 0x80 ?
                                        "reliable" : "unreliable");
                                kfree_skb(bcsp->rx_skb);
                        }
@@ -562,7 +566,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
        struct bcsp_struct *bcsp = hu->priv;
        const unsigned char *ptr;
 
-       BT_DBG("hu %p count %d rx_state %d rx_count %ld", 
+       BT_DBG("hu %p count %d rx_state %d rx_count %ld",
                hu, count, bcsp->rx_state, bcsp->rx_count);
 
        ptr = data;
@@ -591,7 +595,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
                                continue;
                        }
                        if (bcsp->rx_skb->data[0] & 0x80        /* reliable pkt */
-                                       && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
+                                               && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
                                BT_ERR("Out-of-order packet arrived, got %u expected %u",
                                        bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
 
@@ -601,7 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
                                continue;
                        }
                        bcsp->rx_state = BCSP_W4_DATA;
-                       bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + 
+                       bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
                                        (bcsp->rx_skb->data[2] << 4);   /* May be 0 */
                        continue;
 
@@ -615,7 +619,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
 
                case BCSP_W4_CRC:
                        if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
-                               BT_ERR ("Checksum failed: computed %04x received %04x",
+                               BT_ERR("Checksum failed: computed %04x received %04x",
                                        bitrev16(bcsp->message_crc),
                                        bscp_get_crc(bcsp));
 
@@ -653,8 +657,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
                                BCSP_CRC_INIT(bcsp->message_crc);
 
                                /* Do not increment ptr or decrement count
-                                * Allocate packet. Max len of a BCSP pkt= 
-                                * 0xFFF (payload) +4 (header) +2 (crc) */
+                                * Allocate packet. Max len of a BCSP pkt=
+                                * 0xFFF (payload) +4 (header) +2 (crc)
+                                */
 
                                bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
                                if (!bcsp->rx_skb) {
index 91d605147b10e45a721941582ac366185afb0da3..f6f2b01a1fea513a93d46d83b3c46f37ca6246d9 100644 (file)
@@ -1210,8 +1210,7 @@ static int intel_probe(struct platform_device *pdev)
 
        idev->pdev = pdev;
 
-       idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
-                                             GPIOD_OUT_LOW);
+       idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(idev->reset)) {
                dev_err(&pdev->dev, "Unable to retrieve gpio\n");
                return PTR_ERR(idev->reset);
@@ -1223,8 +1222,7 @@ static int intel_probe(struct platform_device *pdev)
 
                dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
 
-               host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
-                                                   GPIOD_IN);
+               host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
                if (IS_ERR(host_wake)) {
                        dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
                        goto no_irq;
index c00168a5bb800f3e74f6924f197e668020f02eb9..49b3e1e2d236ef6fa45c652b8c917532ba62ba06 100644 (file)
@@ -227,7 +227,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
        tty_ldisc_flush(tty);
        tty_driver_flush_buffer(tty);
 
-       if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+       if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
                hu->proto->flush(hu);
 
        return 0;
@@ -492,7 +492,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
 
        cancel_work_sync(&hu->write_work);
 
-       if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+       if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
                if (hdev) {
                        if (test_bit(HCI_UART_REGISTERED, &hu->flags))
                                hci_unregister_dev(hdev);
@@ -500,6 +500,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
                }
                hu->proto->close(hu);
        }
+       clear_bit(HCI_UART_PROTO_SET, &hu->flags);
 
        kfree(hu);
 }
@@ -526,7 +527,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
        if (tty != hu->tty)
                return;
 
-       if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+       if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
                hci_uart_tx_wakeup(hu);
 }
 
@@ -550,7 +551,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
        if (!hu || tty != hu->tty)
                return;
 
-       if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
+       if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
                return;
 
        /* It does not need a lock here as it is already protected by a mutex in
@@ -638,9 +639,11 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
                return err;
 
        hu->proto = p;
+       set_bit(HCI_UART_PROTO_READY, &hu->flags);
 
        err = hci_uart_register_dev(hu);
        if (err) {
+               clear_bit(HCI_UART_PROTO_READY, &hu->flags);
                p->close(hu);
                return err;
        }
index 4814ff08f4270156c70aaced894ec0cb2a5d5f63..839bad1d81522a2718245517c1e1dca74e2d336b 100644 (file)
@@ -95,6 +95,7 @@ struct hci_uart {
 /* HCI_UART proto flag bits */
 #define HCI_UART_PROTO_SET     0
 #define HCI_UART_REGISTERED    1
+#define HCI_UART_PROTO_READY   2
 
 /* TX states  */
 #define HCI_UART_SENDING       1
index 80783dcb7f57525411cb33c4f3d8076dd92920ea..aba31210c802769206435fe2bd214991df449f02 100644 (file)
@@ -50,6 +50,7 @@ struct vhci_data {
        wait_queue_head_t read_wait;
        struct sk_buff_head readq;
 
+       struct mutex open_mutex;
        struct delayed_work open_timeout;
 };
 
@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
 {
        struct hci_dev *hdev;
        struct sk_buff *skb;
        __u8 dev_type;
 
+       if (data->hdev)
+               return -EBADFD;
+
        /* bits 0-1 are dev_type (BR/EDR or AMP) */
        dev_type = opcode & 0x03;
 
@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
        return 0;
 }
 
+static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+{
+       int err;
+
+       mutex_lock(&data->open_mutex);
+       err = __vhci_create_device(data, opcode);
+       mutex_unlock(&data->open_mutex);
+
+       return err;
+}
+
 static inline ssize_t vhci_get_user(struct vhci_data *data,
                                    struct iov_iter *from)
 {
@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
                break;
 
        case HCI_VENDOR_PKT:
-               if (data->hdev) {
-                       kfree_skb(skb);
-                       return -EBADFD;
-               }
-
                cancel_delayed_work_sync(&data->open_timeout);
 
                opcode = *((__u8 *) skb->data);
@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
        skb_queue_head_init(&data->readq);
        init_waitqueue_head(&data->read_wait);
 
+       mutex_init(&data->open_mutex);
        INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
 
        file->private_data = data;
@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
 static int vhci_release(struct inode *inode, struct file *file)
 {
        struct vhci_data *data = file->private_data;
-       struct hci_dev *hdev = data->hdev;
+       struct hci_dev *hdev;
 
        cancel_delayed_work_sync(&data->open_timeout);
 
+       hdev = data->hdev;
+
        if (hdev) {
                hci_unregister_dev(hdev);
                hci_free_dev(hdev);
        }
 
+       skb_queue_purge(&data->readq);
        file->private_data = NULL;
        kfree(data);
 
index c2e52864bb03a5d862c00f712527df2d25e9217b..ce54a0160faaf4f5a48289e395f98a8c316bdc58 100644 (file)
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
                }
        }
 
-       pr_err("invalid dram address 0x%x\n", phyaddr);
+       pr_err("invalid dram address %pa\n", &phyaddr);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
index 834a2aeaf27a8d80b9dd1df110473dcab4ef154b..350b7309c26d714c1a80c2a56b2a2d9acf1040bb 100644 (file)
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
 
        for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
                for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
-                       if (priv->bank[i].end > priv->bank[j].base ||
+                       if (priv->bank[i].end > priv->bank[j].base &&
                            priv->bank[i].base < priv->bank[j].end) {
                                dev_err(priv->dev,
                                        "region overlap between bank%d and bank%d\n",
index ca9c40309757a7c0f89e510577e9597314dcfcc1..5132c9cde50dd9ed0d635419725961d28d42ccdf 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/hw_random.h>
+#include <linux/of.h>
 
 #define RNG_CTRL                       0x00
 #define RNG_EN                         (1 << 0)
index 22c27652e46a8dd6851a56edd408e39095eaa4ad..e524e8302da689141f46832d42616ff0c1fcec08 100644 (file)
@@ -3969,7 +3969,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        dev_kfree_skb(skb);
 
        /* save start time for transmit timeout detection */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* start hardware transmitter if necessary */
        spin_lock_irqsave(&info->lock, flags);
@@ -4032,7 +4032,7 @@ static int hdlcdev_open(struct net_device *dev)
        tty_kref_put(tty);
 
        /* enable network layer transmit */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_start_queue(dev);
 
        /* inform generic HDLC layer of current DCD status */
index 02e18182fcb5506da4c4981a0d5f1926376099e6..2beb396fe6523cb332ec369d6a65a1c7bf9d3e25 100644 (file)
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
        } else {
                clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
-               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
                clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
                clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
                clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
index 2bcecafdeaeac76b2e4decf3dda66d8d7a6f027b..c407c47a32326a568f11b2f586c8e29aa3588572 100644 (file)
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
 
        ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
                                    32, clocksource_mmio_readl_up);
-       if (!ret) {
+       if (ret) {
                pr_err("%s: registration failed\n", np->full_name);
                return;
        }
index f951f911786e086b2b6dc9d615018eb235347dbe..5f8dbe640a202baa2b12d26267ca78b983c8b7fc 100644 (file)
@@ -4,9 +4,6 @@
  * Copyright (C) 2014 Linaro.
  * Viresh Kumar <viresh.kumar@linaro.org>
  *
- * The OPP code in function set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
index b87596b591b3ca9fdf332884700e5a3cc96ebf10..c4acfc5273b3c1f8c5efc321f5ea8407a4b7f02b 100644 (file)
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
 {
        unsigned int new_freq;
 
+       if (cpufreq_suspended)
+               return 0;
+
        new_freq = cpufreq_driver->get(policy->cpu);
        if (!new_freq)
                return 0;
@@ -1554,21 +1557,25 @@ void cpufreq_suspend(void)
        if (!cpufreq_driver)
                return;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->suspend)
                goto suspend;
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
        for_each_active_policy(policy) {
-               down_write(&policy->rwsem);
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               up_write(&policy->rwsem);
+               if (has_target()) {
+                       down_write(&policy->rwsem);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                       up_write(&policy->rwsem);
 
-               if (ret)
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->suspend
-                   && cpufreq_driver->suspend(policy))
+                       if (ret) {
+                               pr_err("%s: Failed to stop governor for policy: %p\n",
+                                       __func__, policy);
+                               continue;
+                       }
+               }
+
+               if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
                                policy);
        }
@@ -1593,7 +1600,7 @@ void cpufreq_resume(void)
 
        cpufreq_suspended = false;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->resume)
                return;
 
        pr_debug("%s: Resuming Governors\n", __func__);
@@ -1602,7 +1609,7 @@ void cpufreq_resume(void)
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
-               } else {
+               } else if (has_target()) {
                        down_write(&policy->rwsem);
                        ret = cpufreq_start_governor(policy);
                        up_write(&policy->rwsem);
index 10a5cfeae8c5e34317805402c63d0c11e3aaac5e..5f1147fa9239cdc325eb8fae871d151a4b36337f 100644 (file)
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
                wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
                j_cdbs->prev_cpu_wall = cur_wall_time;
 
-               if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
-                       idle_time = 0;
-               } else {
-                       idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
-                       j_cdbs->prev_cpu_idle = cur_idle_time;
-               }
+               idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+               j_cdbs->prev_cpu_idle = cur_idle_time;
 
                if (ignore_nice) {
                        u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
index 4b644526fd5977943761d5430e1967d26997567d..b230ebaae66cb7ee0def1d228ff33c778343f344 100644 (file)
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
+/**
+ * struct sample -     Store performance sample
+ * @core_pct_busy:     Ratio of APERF/MPERF in percent, which is actual
+ *                     performance during last sample period
+ * @busy_scaled:       Scaled busy value which is used to calculate next
+ *                     P state. This can be different than core_pct_busy
+ *                     to account for cpu idle period
+ * @aperf:             Difference of actual performance frequency clock count
+ *                     read from APERF MSR between last and current sample
+ * @mperf:             Difference of maximum performance frequency clock count
+ *                     read from MPERF MSR between last and current sample
+ * @tsc:               Difference of time stamp counter between last and
+ *                     current sample
+ * @freq:              Effective frequency calculated from APERF/MPERF
+ * @time:              Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
 struct sample {
        int32_t core_pct_busy;
        int32_t busy_scaled;
@@ -74,6 +93,20 @@ struct sample {
        u64 time;
 };
 
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate:    Current requested P state
+ * @min_pstate:                Min P state possible for this platform
+ * @max_pstate:                Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ *                     This can be higher than the max_pstate which can
+ *                     be limited by platform thermal design power limits
+ * @scaling:           Scaling factor to  convert frequency to cpufreq
+ *                     frequency units
+ * @turbo_pstate:      Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
 struct pstate_data {
        int     current_pstate;
        int     min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
        int     turbo_pstate;
 };
 
+/**
+ * struct vid_data -   Stores voltage information data
+ * @min:               VID data for this platform corresponding to
+ *                     the lowest P state
+ * @max:               VID data corresponding to the highest P State.
+ * @turbo:             VID data for turbo P state
+ * @ratio:             Ratio of (vid max - vid min) /
+ *                     (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
 struct vid_data {
        int min;
        int max;
@@ -90,6 +136,18 @@ struct vid_data {
        int32_t ratio;
 };
 
+/**
+ * struct _pid -       Stores PID data
+ * @setpoint:          Target set point for busyness or performance
+ * @integral:          Storage for accumulated error values
+ * @p_gain:            PID proportional gain
+ * @i_gain:            PID integral gain
+ * @d_gain:            PID derivative gain
+ * @deadband:          PID deadband
+ * @last_err:          Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
 struct _pid {
        int setpoint;
        int32_t integral;
@@ -100,6 +158,23 @@ struct _pid {
        int32_t last_err;
 };
 
+/**
+ * struct cpudata -    Per CPU instance data storage
+ * @cpu:               CPU number for this instance data
+ * @update_util:       CPUFreq utility callback information
+ * @pstate:            Stores P state limits for this CPU
+ * @vid:               Stores VID limits for this CPU
+ * @pid:               Stores PID parameters for this CPU
+ * @last_sample_time:  Last Sample time
+ * @prev_aperf:                Last APERF value read from APERF MSR
+ * @prev_mperf:                Last MPERF value read from MPERF MSR
+ * @prev_tsc:          Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ *                     current sample
+ * @sample:            Storage for storing last Sample data
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
 struct cpudata {
        int cpu;
 
@@ -118,6 +193,19 @@ struct cpudata {
 };
 
 static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms:    PID calculation sample rate in ms
+ * @sample_rate_ns:    Sample rate calculation in ns
+ * @deadband:          PID deadband
+ * @setpoint:          PID Setpoint
+ * @p_gain_pct:                PID proportional gain
+ * @i_gain_pct:                PID integral gain
+ * @d_gain_pct:                PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
 struct pstate_adjust_policy {
        int sample_rate_ms;
        s64 sample_rate_ns;
@@ -128,6 +216,20 @@ struct pstate_adjust_policy {
        int i_gain_pct;
 };
 
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max:           Callback to get maximum non turbo effective P state
+ * @get_max_physical:  Callback to get maximum non turbo physical P state
+ * @get_min:           Callback to get minimum P state
+ * @get_turbo:         Callback to get turbo P state
+ * @get_scaling:       Callback to get frequency scaling factor
+ * @get_val:           Callback to convert P state to actual MSR write value
+ * @get_vid:           Callback to get VID data for Atom platforms
+ * @get_target_pstate: Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
 struct pstate_funcs {
        int (*get_max)(void);
        int (*get_max_physical)(void);
@@ -139,6 +241,11 @@ struct pstate_funcs {
        int32_t (*get_target_pstate)(struct cpudata *);
 };
 
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy:        PID config data
+ * @funcs:             Callback function data
+ */
 struct cpu_defaults {
        struct pstate_adjust_policy pid_policy;
        struct pstate_funcs funcs;
@@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
 
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo:          User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled:    Platform turbo status either from msr
+ *                     MSR_IA32_MISC_ENABLE or when maximum available pstate
+ *                     matches the maximum turbo pstate
+ * @max_perf_pct:      Effective maximum performance limit in percentage, this
+ *                     is minimum of either limits enforced by cpufreq policy
+ *                     or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct:      Effective minimum performance limit in percentage, this
+ *                     is maximum of either limits enforced by cpufreq policy
+ *                     or limits from user set limits via intel_pstate sysfs
+ * @max_perf:          This is a scaled value between 0 to 255 for max_perf_pct
+ *                     This value is used to limit max pstate
+ * @min_perf:          This is a scaled value between 0 to 255 for min_perf_pct
+ *                     This value is used to limit min pstate
+ * @max_policy_pct:    The maximum performance in percentage enforced by
+ *                     cpufreq setpolicy interface
+ * @max_sysfs_pct:     The maximum performance in percentage enforced by
+ *                     intel pstate sysfs interface
+ * @min_policy_pct:    The minimum performance in percentage enforced by
+ *                     cpufreq setpolicy interface
+ * @min_sysfs_pct:     The minimum performance in percentage enforced by
+ *                     intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
 struct perf_limits {
        int no_turbo;
        int turbo_disabled;
@@ -318,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
        }
 }
 
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+       if (hwp_active)
+               intel_pstate_hwp_set(policy->cpus);
+
+       return 0;
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
        get_online_cpus();
@@ -678,6 +821,11 @@ static int core_get_max_pstate(void)
                        if (err)
                                goto skip_tar;
 
+                       /* For level 1 and 2, bits[23:16] contain the ratio */
+                       if (tdp_ctrl)
+                               tdp_ratio >>= 16;
+
+                       tdp_ratio &= 0xff; /* ratios are only 8 bits long */
                        if (tdp_ratio - 1 == tar) {
                                max_pstate = tar;
                                pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -910,13 +1058,21 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
        cpu->prev_tsc = tsc;
-       return true;
+       /*
+        * First time this function is invoked in a given cycle, all of the
+        * previous sample data fields are equal to zero or stale and they must
+        * be populated with meaningful numbers for things to work, so assume
+        * that sample.time will always be reset before setting the utilization
+        * update hook and make the caller skip the sample then.
+        */
+       return !!cpu->last_sample_time;
 }
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-       return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-               cpu->pstate.scaling, cpu->sample.mperf);
+       return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+                              int_tofp(cpu->pstate.max_pstate_physical *
+                                               cpu->pstate.scaling / 100)));
 }
 
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -959,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
        int32_t core_busy, max_pstate, current_pstate, sample_ratio;
        u64 duration_ns;
 
-       intel_pstate_calc_busy(cpu);
-
        /*
         * core_busy is the ratio of actual performance to max
         * max_pstate is the max non turbo pstate available
@@ -984,11 +1138,14 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
         * enough period of time to adjust our busyness.
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
-       if ((s64)duration_ns > pid_params.sample_rate_ns * 3
-           && cpu->last_sample_time > 0) {
+       if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
                sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
                                      int_tofp(duration_ns));
                core_busy = mul_fp(core_busy, sample_ratio);
+       } else {
+               sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+               if (sample_ratio < int_tofp(1))
+                       core_busy = 0;
        }
 
        cpu->sample.busy_scaled = core_busy;
@@ -1041,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);
 
-               if (sample_taken && !hwp_active)
-                       intel_pstate_adjust_busy_pstate(cpu);
+               if (sample_taken) {
+                       intel_pstate_calc_busy(cpu);
+                       if (!hwp_active)
+                               intel_pstate_adjust_busy_pstate(cpu);
+               }
        }
 }
 
@@ -1100,10 +1260,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        intel_pstate_get_cpu_pstates(cpu);
 
        intel_pstate_busy_pid_reset(cpu);
-       intel_pstate_sample(cpu, 0);
 
        cpu->update_util.func = intel_pstate_update_util;
-       cpufreq_set_update_util_data(cpunum, &cpu->update_util);
 
        pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
 
@@ -1122,22 +1280,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
        return get_avg_frequency(cpu);
 }
 
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+       struct cpudata *cpu = all_cpu_data[cpu_num];
+
+       /* Prevent intel_pstate_update_util() from using stale data. */
+       cpu->sample.time = 0;
+       cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+       cpufreq_set_update_util_data(cpu, NULL);
+       synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+       limits->no_turbo = 0;
+       limits->turbo_disabled = 0;
+       limits->max_perf_pct = 100;
+       limits->max_perf = int_tofp(1);
+       limits->min_perf_pct = 100;
+       limits->min_perf = int_tofp(1);
+       limits->max_policy_pct = 100;
+       limits->max_sysfs_pct = 100;
+       limits->min_policy_pct = 0;
+       limits->min_sysfs_pct = 0;
+}
+
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
-           policy->max >= policy->cpuinfo.max_freq) {
-               pr_debug("intel_pstate: set performance\n");
+       intel_pstate_clear_update_util_hook(policy->cpu);
+
+       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
-               if (hwp_active)
-                       intel_pstate_hwp_set(policy->cpus);
-               return 0;
+               if (policy->max >= policy->cpuinfo.max_freq) {
+                       pr_debug("intel_pstate: set performance\n");
+                       intel_pstate_set_performance_limits(limits);
+                       goto out;
+               }
+       } else {
+               pr_debug("intel_pstate: set powersave\n");
+               limits = &powersave_limits;
        }
 
-       pr_debug("intel_pstate: set powersave\n");
-       limits = &powersave_limits;
        limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
        limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
        limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1163,8 +1353,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
                                  int_tofp(100));
 
-       if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+ out:
+       intel_pstate_set_update_util_hook(policy->cpu);
+
+       intel_pstate_hwp_set_policy(policy);
 
        return 0;
 }
@@ -1187,8 +1379,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
 
        pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
 
-       cpufreq_set_update_util_data(cpu_num, NULL);
-       synchronize_sched();
+       intel_pstate_clear_update_util_hook(cpu_num);
 
        if (hwp_active)
                return;
@@ -1229,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
+       .resume         = intel_pstate_hwp_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
        .stop_cpu       = intel_pstate_stop_cpu,
@@ -1455,8 +1647,7 @@ out:
        get_online_cpus();
        for_each_online_cpu(cpu) {
                if (all_cpu_data[cpu]) {
-                       cpufreq_set_update_util_data(cpu, NULL);
-                       synchronize_sched();
+                       intel_pstate_clear_update_util_hook(cpu);
                        kfree(all_cpu_data[cpu]);
                }
        }
index a9c659f589747a2140f2b91e0c07cd7fe1cbcb69..04042038ec4b75e78631eda625f0f1bb2a4b4d95 100644 (file)
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
 {
        int ret;
 
+       if ((!of_machine_is_compatible("st,stih407")) &&
+               (!of_machine_is_compatible("st,stih410")))
+               return -ENODEV;
+
        ddata.cpu = get_cpu_device(0);
        if (!ddata.cpu) {
                dev_err(ddata.cpu, "Failed to get device for CPU0\n");
index 545069d5fdfba3e7ab07f0ec296404c64ad2a977..e342565e8715e95af6c76a80ca393d8fcfada66a 100644 (file)
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
                 * call the CPU ops suspend protocol with idle index as a
                 * parameter.
                 */
-               arm_cpuidle_suspend(idx);
+               ret = arm_cpuidle_suspend(idx);
 
                cpu_pm_exit();
        }
index 3d9acc53d2473818d1e37c4af432a4afcbdf4ec4..60fc0fa26fd3b19ab557888d9bd6e61c208fe0f6 100644 (file)
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_aes_cmac_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.null_msg = rctx->null_msg;
        memcpy(state.iv, rctx->iv, sizeof(state.iv));
        state.buf_count = rctx->buf_count;
index b5ad72897dc25b8d11642315f3e087af32d1c845..8f36af62fe951032d22dfd9f7ea0150e43d6360b 100644 (file)
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
        struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_sha_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.type = rctx->type;
        state.msg_bits = rctx->msg_bits;
        state.first = rctx->first;
index 0e82ce3c383e8c6c1f1f39378fbff03cc17e0336..976b01e58afbfd7f6690b47108232540aac0ad6e 100644 (file)
@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                 uint32_t vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
 #else
 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 {
@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
+
+static inline int adf_init_pf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
 #endif
 #endif
index 5c897e6e799408df06e28a0df7363f2fa87cde87..3c3f948290ca057c29edc1297f731abe3b9ebc63 100644 (file)
@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
        if (adf_init_aer())
                goto err_aer;
 
+       if (adf_init_pf_wq())
+               goto err_pf_wq;
+
        if (qat_crypto_register())
                goto err_crypto_register;
 
        return 0;
 
 err_crypto_register:
+       adf_exit_pf_wq();
+err_pf_wq:
        adf_exit_aer();
 err_aer:
        adf_chr_drv_destroy();
@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
 {
        adf_chr_drv_destroy();
        adf_exit_aer();
+       adf_exit_pf_wq();
        qat_crypto_unregister();
        adf_clean_vf_map(false);
        mutex_destroy(&adf_ctl_lock);
index 1117a8b58280a084396f696e77f14b9899386981..38a0415e767da3dea08e673eab1b402d0a978a71 100644 (file)
@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
        int i;
        u32 reg;
 
-       /* Workqueue for PF2VF responses */
-       pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
-       if (!pf2vf_resp_wq)
-               return -ENOMEM;
-
        for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
             i++, vf_info++) {
                /* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 
        kfree(accel_dev->pf.vf_info);
        accel_dev->pf.vf_info = NULL;
-
-       if (pf2vf_resp_wq) {
-               destroy_workqueue(pf2vf_resp_wq);
-               pf2vf_resp_wq = NULL;
-       }
 }
 EXPORT_SYMBOL_GPL(adf_disable_sriov);
 
@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
        return numvfs;
 }
 EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+       /* Workqueue for PF2VF responses */
+       pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+
+       return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+       if (pf2vf_resp_wq) {
+               destroy_workqueue(pf2vf_resp_wq);
+               pf2vf_resp_wq = NULL;
+       }
+}
index a0d4a08313ae895d1595428a6e70f4c1c528b6d4..aae05547b924bf86b518245ced599e02c787bf35 100644 (file)
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
                ptr->eptr = upper_32_bits(dma_addr);
 }
 
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+                            struct talitos_ptr *src_ptr, bool is_sec1)
+{
+       dst_ptr->ptr = src_ptr->ptr;
+       if (!is_sec1)
+               dst_ptr->eptr = src_ptr->eptr;
+}
+
 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
                               bool is_sec1)
 {
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
                              (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
                                                           : DMA_TO_DEVICE);
-
        /* hmac data */
        desc->ptr[1].len = cpu_to_be16(areq->assoclen);
        if (sg_count > 1 &&
            (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
                                         areq->assoclen,
                                         &edesc->link_tbl[tbl_off])) > 1) {
-               tbl_off += ret;
-
                to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
                               sizeof(struct talitos_ptr), 0);
                desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
 
                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
+
+               tbl_off += ret;
        } else {
                to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
                desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
                sg_link_tbl_len += authsize;
 
-       if (sg_count > 1 &&
-           (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
-                                        sg_link_tbl_len,
-                                        &edesc->link_tbl[tbl_off])) > 1) {
-               tbl_off += ret;
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+                              areq->assoclen, 0);
+       } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+                                               areq->assoclen, sg_link_tbl_len,
+                                               &edesc->link_tbl[tbl_off])) >
+                  1) {
                desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
                to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
                                              tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
                                           edesc->dma_len,
                                           DMA_BIDIRECTIONAL);
-       } else
-               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+               tbl_off += ret;
+       } else {
+               copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+       }
 
        /* cipher out */
        desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 
        edesc->icv_ool = false;
 
-       if (sg_count > 1 &&
-           (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+                              areq->assoclen, 0);
+       } else if ((sg_count =
+                       sg_to_link_tbl_offset(areq->dst, sg_count,
                                              areq->assoclen, cryptlen,
-                                             &edesc->link_tbl[tbl_off])) >
-           1) {
+                                             &edesc->link_tbl[tbl_off])) > 1) {
                struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
                to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
 
                edesc->icv_ool = true;
-       } else
-               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+       } else {
+               copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+       }
 
        /* iv out */
        map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
        struct talitos_alg_template algt;
 };
 
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+                              struct talitos_crypto_alg *talitos_alg)
 {
-       struct crypto_alg *alg = tfm->__crt_alg;
-       struct talitos_crypto_alg *talitos_alg;
-       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
        struct talitos_private *priv;
 
-       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
-               talitos_alg = container_of(__crypto_ahash_alg(alg),
-                                          struct talitos_crypto_alg,
-                                          algt.alg.hash);
-       else
-               talitos_alg = container_of(alg, struct talitos_crypto_alg,
-                                          algt.alg.crypto);
-
        /* update context with ptr to dev */
        ctx->dev = talitos_alg->dev;
 
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct talitos_crypto_alg *talitos_alg;
+       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+               talitos_alg = container_of(__crypto_ahash_alg(alg),
+                                          struct talitos_crypto_alg,
+                                          algt.alg.hash);
+       else
+               talitos_alg = container_of(alg, struct talitos_crypto_alg,
+                                          algt.alg.crypto);
+
+       return talitos_init_common(ctx, talitos_alg);
+}
+
 static int talitos_cra_init_aead(struct crypto_aead *tfm)
 {
-       talitos_cra_init(crypto_aead_tfm(tfm));
-       return 0;
+       struct aead_alg *alg = crypto_aead_alg(tfm);
+       struct talitos_crypto_alg *talitos_alg;
+       struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+       talitos_alg = container_of(alg, struct talitos_crypto_alg,
+                                  algt.alg.aead);
+
+       return talitos_init_common(ctx, talitos_alg);
 }
 
 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
index 5ad0ec1f0e29f750eee5c70889df5427d7413556..97199b3c25a22cd907c1a73648c76a96a594b370 100644 (file)
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
 static void dwc_initialize(struct dw_dma_chan *dwc)
 {
        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-       struct dw_dma_slave *dws = dwc->chan.private;
        u32 cfghi = DWC_CFGH_FIFO_MODE;
        u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
 
        if (dwc->initialized == true)
                return;
 
-       if (dws) {
-               /*
-                * We need controller-specific data to set up slave
-                * transfers.
-                */
-               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-               cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
-               cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
-       } else {
-               cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
-               cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
-       }
+       cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+       cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
 
        channel_writel(dwc, CFG_LO, cfglo);
        channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
        struct dw_dma_slave *dws = param;
 
-       if (!dws || dws->dma_dev != chan->device->dev)
+       if (dws->dma_dev != chan->device->dev)
                return false;
 
        /* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
         * doesn't mean what you think it means), and status writeback.
         */
 
+       /*
+        * We need controller-specific data to set up slave transfers.
+        */
+       if (chan->private && !dw_dma_filter(chan, chan->private)) {
+               dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+               return -EINVAL;
+       }
+
        /* Enable controller here if needed */
        if (!dw->in_use)
                dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        spin_lock_irqsave(&dwc->lock, flags);
        list_splice_init(&dwc->free_list, &list);
        dwc->descs_allocated = 0;
+
+       /* Clear custom channel configuration */
+       dwc->src_id = 0;
+       dwc->dst_id = 0;
+
+       dwc->src_master = 0;
+       dwc->dst_master = 0;
+
        dwc->initialized = false;
 
        /* Disable interrupts */
index ee3463e774f8e4dc5c46e2614719f9d0723cfd06..04070baab78ab0cc7772085dd70828741317dd5a 100644 (file)
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
        struct edma_desc *edesc;
        dma_addr_t src_addr, dst_addr;
        enum dma_slave_buswidth dev_width;
+       bool use_intermediate = false;
        u32 burst;
        int i, ret, nslots;
 
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
         * but the synchronization is difficult to achieve with Cyclic and
         * cannot be guaranteed, so we error out early.
         */
-       if (nslots > MAX_NR_SG)
-               return NULL;
+       if (nslots > MAX_NR_SG) {
+               /*
+                * If the burst and period sizes are the same, we can put
+                * the full buffer into a single period and activate
+                * intermediate interrupts. This will produce interrupts
+                * after each burst, which is also after each desired period.
+                */
+               if (burst == period_len) {
+                       period_len = buf_len;
+                       nslots = 2;
+                       use_intermediate = true;
+               } else {
+                       return NULL;
+               }
+       }
 
        edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
                        GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
                /*
                 * Enable period interrupt only if it is requested
                 */
-               if (tx_flags & DMA_PREP_INTERRUPT)
+               if (tx_flags & DMA_PREP_INTERRUPT) {
                        edesc->pset[i].param.opt |= TCINTEN;
+
+                       /* Also enable intermediate interrupts if necessary */
+                       if (use_intermediate)
+                               edesc->pset[i].param.opt |= ITCINTEN;
+               }
        }
 
        /* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
-       struct platform_device *tc_pdev;
-       int ret;
-
-       if (!IS_ENABLED(CONFIG_OF) || !tc)
-               return;
-
-       tc_pdev = of_find_device_by_node(tc->node);
-       if (!tc_pdev) {
-               pr_err("%s: TPTC device is not found\n", __func__);
-               return;
-       }
-       if (!pm_runtime_enabled(&tc_pdev->dev))
-               pm_runtime_enable(&tc_pdev->dev);
-
-       if (enable)
-               ret = pm_runtime_get_sync(&tc_pdev->dev);
-       else
-               ret = pm_runtime_put_sync(&tc_pdev->dev);
-
-       if (ret < 0)
-               pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
-                      enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
 /* Alloc channel resources */
 static int edma_alloc_chan_resources(struct dma_chan *chan)
 {
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
                EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
                echan->hw_triggered ? "HW" : "SW");
 
-       edma_tc_set_pm_state(echan->tc, true);
-
        return 0;
 
 err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
                echan->alloced = false;
        }
 
-       edma_tc_set_pm_state(echan->tc, false);
        echan->tc = NULL;
        echan->hw_triggered = false;
 
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
        int i;
 
        for (i = 0; i < ecc->num_channels; i++) {
-               if (echan[i].alloced) {
+               if (echan[i].alloced)
                        edma_setup_interrupt(&echan[i], false);
-                       edma_tc_set_pm_state(echan[i].tc, false);
-               }
        }
 
        return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
 
                        /* Set up channel -> slot mapping for the entry slot */
                        edma_set_chmap(&echan[i], echan[i].slot[0]);
-
-                       edma_tc_set_pm_state(echan[i].tc, true);
                }
        }
 
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
 
 static int edma_tptc_probe(struct platform_device *pdev)
 {
-       return 0;
+       pm_runtime_enable(&pdev->dev);
+       return pm_runtime_get_sync(&pdev->dev);
 }
 
 static struct platform_driver edma_tptc_driver = {
index eef145edb936816f3686d343742a709392a5dffa..ee510515ce187af75d3ba873b65d5de26cc3acb4 100644 (file)
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
 
        if (hsuc->direction == DMA_MEM_TO_DEV) {
                bsr = config->dst_maxburst;
-               mtsr = config->dst_addr_width;
+               mtsr = config->src_addr_width;
        } else if (hsuc->direction == DMA_DEV_TO_MEM) {
                bsr = config->src_maxburst;
-               mtsr = config->src_addr_width;
+               mtsr = config->dst_addr_width;
        }
 
        hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
        sr = hsu_chan_readl(hsuc, HSU_CH_SR);
        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
-       return sr;
+       return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 }
 
 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 {
        struct hsu_dma_desc *desc = hsuc->desc;
-       size_t bytes = desc->length;
+       size_t bytes = 0;
        int i;
 
-       i = desc->active % HSU_DMA_CHAN_NR_DESC;
+       for (i = desc->active; i < desc->nents; i++)
+               bytes += desc->sg[i].len;
+
+       i = HSU_DMA_CHAN_NR_DESC - 1;
        do {
                bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
        } while (--i >= 0);
index 578a8ee8cd054429b3e3fd6da2341b49020721cc..6b070c22b1dfc2bca1dc04ca12218de17d1f0d70 100644 (file)
@@ -41,6 +41,9 @@
 #define HSU_CH_SR_DESCTO(x)    BIT(8 + (x))
 #define HSU_CH_SR_DESCTO_ANY   (BIT(11) | BIT(10) | BIT(9) | BIT(8))
 #define HSU_CH_SR_CHE          BIT(15)
+#define HSU_CH_SR_DESCE(x)     BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY    (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY    (BIT(31) | BIT(30))
 
 /* Bits in HSU_CH_CR */
 #define HSU_CH_CR_CHA          BIT(0)
index 43bd5aee7ffe093d78e6f8c1f43b75084f8f1c8e..1e984e18c1266b74fc01c247fad36f3e132ad587 100644 (file)
@@ -48,6 +48,7 @@ struct omap_chan {
        unsigned dma_sig;
        bool cyclic;
        bool paused;
+       bool running;
 
        int dma_ch;
        struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
 
        /* Enable channel */
        omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+       c->running = true;
 }
 
 static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
 
                omap_dma_chan_write(c, CLNK_CTRL, val);
        }
+
+       c->running = false;
 }
 
 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        struct omap_chan *c = to_omap_dma_chan(chan);
        struct virt_dma_desc *vd;
        enum dma_status ret;
-       uint32_t ccr;
        unsigned long flags;
 
-       ccr = omap_dma_chan_read(c, CCR);
-       /* The channel is no longer active, handle the completion right away */
-       if (!(ccr & CCR_ENABLE))
-               omap_dma_callback(c->dma_ch, 0, c);
-
        ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (!c->paused && c->running) {
+               uint32_t ccr = omap_dma_chan_read(c, CCR);
+               /*
+                * The channel is no longer active, set the return value
+                * accordingly
+                */
+               if (!(ccr & CCR_ENABLE))
+                       ret = DMA_COMPLETE;
+       }
+
        if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
        d->ccr = c->ccr;
        d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
 
-       d->cicr = CICR_DROP_IE;
-       if (tx_flags & DMA_PREP_INTERRUPT)
-               d->cicr |= CICR_FRAME_IE;
+       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
 
        d->csdp = data_type;
 
index 0ee0321868d36b3054acac0a02356e09cc0749a1..ef67f278e076da61c3e08428a70bffd0d8f9341c 100644 (file)
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
        struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
        int chan_id = dma_spec->args[0];
 
-       if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+       if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
                return NULL;
 
        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
index 01087a38da226d08bd7e08da5a183885fbf420b1..792bdae2b91dfcf28a8a7fca199212d4990aa8af 100644 (file)
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
 
        i7_dev = get_i7core_dev(mce->socketid);
        if (!i7_dev)
-               return NOTIFY_BAD;
+               return NOTIFY_DONE;
 
        mci = i7_dev->mci;
        pvt = mci->pvt_info;
index 93f0d4120289fa92d01a6873fac6d2efd37337e7..8bf745d2da7e1750571860d58be00d053579f301 100644 (file)
@@ -362,6 +362,7 @@ struct sbridge_pvt {
 
        /* Memory type detection */
        bool                    is_mirrored, is_lockstep, is_close_pg;
+       bool                    is_chan_hash;
 
        /* Fifo double buffers */
        struct mce              mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
        return (pkg >> 2) & 0x1;
 }
 
+static int haswell_chan_hash(int idx, u64 addr)
+{
+       int i;
+
+       /*
+        * XOR even bits from 12:26 to bit0 of idx,
+        *     odd bits from 13:27 to bit1
+        */
+       for (i = 12; i < 28; i += 2)
+               idx ^= (addr >> i) & 3;
+
+       return idx;
+}
+
 /****************************************************************************
                        Memory check routines
  ****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                KNL_MAX_CHANNELS : NUM_CHANNELS;
        u64 knl_mc_sizes[KNL_MAX_CHANNELS];
 
+       if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+               pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+               pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+       }
        if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
                        pvt->info.type == KNIGHTS_LANDING)
                pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        }
 
        ch_way = TAD_CH(reg) + 1;
-       sck_way = 1 << TAD_SOCK(reg);
+       sck_way = TAD_SOCK(reg);
 
        if (ch_way == 3)
                idx = addr >> 6;
-       else
+       else {
                idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+               if (pvt->is_chan_hash)
+                       idx = haswell_chan_hash(idx, addr);
+       }
        idx = idx % ch_way;
 
        /*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                switch(ch_way) {
                case 2:
                case 4:
-                       sck_xch = 1 << sck_way * (ch_way >> 1);
+                       sck_xch = (1 << sck_way) * (ch_way >> 1);
                        break;
                default:
                        sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 
        ch_addr = addr - offset;
        ch_addr >>= (6 + shiftup);
-       ch_addr /= ch_way * sck_way;
+       ch_addr /= sck_xch;
        ch_addr <<= (6 + shiftup);
        ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
 
@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
 
        mci = get_mci_for_node_id(mce->socketid);
        if (!mci)
-               return NOTIFY_BAD;
+               return NOTIFY_DONE;
        pvt = mci->pvt_info;
 
        /*
index 841a4b58639543a4d5107dfaab9544b0a5425d37..8b3226dca1d9d25e004bbb2a030a7d77e9ff48d2 100644 (file)
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
                                                palmas_vbus_irq_handler,
                                                IRQF_TRIGGER_FALLING |
                                                IRQF_TRIGGER_RISING |
-                                               IRQF_ONESHOT |
-                                               IRQF_EARLY_RESUME,
+                                               IRQF_ONESHOT,
                                                "palmas_usb_vbus",
                                                palmas_usb);
                if (status < 0) {
index f4ea80d602f73bd6bc5adac2a32bbbe2b58de716..309311b1faae18752a9bf24b0d350933aad4627c 100644 (file)
@@ -1023,7 +1023,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
-       dev->netdev->trans_start = jiffies;
+       netif_trans_update(dev->netdev);
  out:
        if (free)
                fwnet_free_ptask(ptask);
index aa1f743152a2fd317a82c8ac7ca3daf36e865b46..8714f8c271babfff5566765605b5429f33bfa5d8 100644 (file)
@@ -203,7 +203,19 @@ void __init efi_init(void)
 
        reserve_regions();
        early_memunmap(memmap.map, params.mmap_size);
-       memblock_mark_nomap(params.mmap & PAGE_MASK,
-                           PAGE_ALIGN(params.mmap_size +
-                                      (params.mmap & ~PAGE_MASK)));
+
+       if (IS_ENABLED(CONFIG_ARM)) {
+               /*
+                * ARM currently does not allow ioremap_cache() to be called on
+                * memory regions that are covered by struct page. So remove the
+                * UEFI memory map from the linear mapping.
+                */
+               memblock_mark_nomap(params.mmap & PAGE_MASK,
+                                   PAGE_ALIGN(params.mmap_size +
+                                              (params.mmap & ~PAGE_MASK)));
+       } else {
+               memblock_reserve(params.mmap & PAGE_MASK,
+                                PAGE_ALIGN(params.mmap_size +
+                                           (params.mmap & ~PAGE_MASK)));
+       }
 }
index 0ac594c0a234c81e025a0ee980c92a8596a2c017..34b741940494a24682e6cbf0e96d3246468a4fd6 100644 (file)
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
        { NULL_GUID, "", NULL },
 };
 
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ *              final "*" character matches any trailing characters @var_name,
+ *              including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ *         that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
 static bool
 variable_matches(const char *var_name, size_t len, const char *match_name,
                 int *match)
 {
        for (*match = 0; ; (*match)++) {
                char c = match_name[*match];
-               char u = var_name[*match];
 
-               /* Wildcard in the matching name means we've matched */
-               if (c == '*')
+               switch (c) {
+               case '*':
+                       /* Wildcard in @match_name means we've matched. */
                        return true;
 
-               /* Case sensitive match */
-               if (!c && *match == len)
-                       return true;
+               case '\0':
+                       /* @match_name has ended. Has @var_name too? */
+                       return (*match == len);
 
-               if (c != u)
+               default:
+                       /*
+                        * We've reached a non-wildcard char in @match_name.
+                        * Continue only if there's an identical character in
+                        * @var_name.
+                        */
+                       if (*match < len && c == var_name[*match])
+                               continue;
                        return false;
-
-               if (!c)
-                       return true;
+               }
        }
-       return true;
 }
 
 bool
index 11bfee8b79a9f65418bcbe53dfc708edb220e69d..b5d05807e6ecd509058fcb85a3f4810ce17a8adc 100644 (file)
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
        .init = psci_dt_cpu_init_idle,
 };
 
-CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
 #endif
 #endif
 
index fedbff55a7f3848872b7d0196086bc0f56caa0c7..1b95475b6aefefd5144bca29ab464304826c6377 100644 (file)
@@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
 static inline void fw_cfg_read_blob(u16 key,
                                    void *buf, loff_t pos, size_t count)
 {
+       u32 glk = -1U;
+       acpi_status status;
+
+       /* If we have ACPI, ensure mutual exclusion against any potential
+        * device access by the firmware, e.g. via AML methods:
+        */
+       status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+       if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+               /* Should never get here */
+               WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+               memset(buf, 0, count);
+               return;
+       }
+
        mutex_lock(&fw_cfg_dev_lock);
        iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
        while (pos-- > 0)
                ioread8(fw_cfg_reg_data);
        ioread8_rep(fw_cfg_reg_data, buf, count);
        mutex_unlock(&fw_cfg_dev_lock);
+
+       acpi_release_global_lock(glk);
 }
 
 /* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
 
 static int __init fw_cfg_sysfs_init(void)
 {
+       int ret;
+
        /* create /sys/firmware/qemu_fw_cfg/ top level directory */
        fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
        if (!fw_cfg_top_ko)
                return -ENOMEM;
 
-       return platform_driver_register(&fw_cfg_sysfs_driver);
+       ret = platform_driver_register(&fw_cfg_sysfs_driver);
+       if (ret)
+               fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+       return ret;
 }
 
 static void __exit fw_cfg_sysfs_exit(void)
index d0d3065a755767dcfeec9108921ac431fc490235..e66084c295fbff18a115927ecc28822aae6b3e63 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_data/pca953x.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <linux/of_platform.h>
 #include <linux/acpi.h>
 
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
                switch (chip->chip_type) {
                case PCA953X_TYPE:
                        ret = i2c_smbus_write_word_data(chip->client,
-                                                       reg << 1, (u16) *val);
+                           reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
                        break;
                case PCA957X_TYPE:
                        ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
index b2b7b78664b8058d9be9314e2298765fe772c0e9..76ac906b4d78d262ab5473771e377c9c244b76b3 100644 (file)
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
        writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
 
        ret = pinctrl_gpio_direction_output(chip->base + offset);
-       if (!ret)
-               return 0;
+       if (ret)
+               return ret;
 
        spin_lock_irqsave(&gpio_lock, flags);
 
index d9ab0cd1d205963528d7022d66213c26bf8304e5..4d9a315cfd43ea8fb9d97b0578f9c6bd4c38ef00 100644 (file)
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
 {
        struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       error = pinctrl_request_gpio(chip->base + offset);
-       if (error)
-               pm_runtime_put(&p->pdev->dev);
-
-       return error;
+       return pinctrl_request_gpio(chip->base + offset);
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
        pinctrl_free_gpio(chip->base + offset);
 
-       /* Set the GPIO as an input to ensure that the next GPIO request won't
+       /*
+        * Set the GPIO as an input to ensure that the next GPIO request won't
         * drive the GPIO pin as an output.
         */
        gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
-       pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
 
        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        irq_chip->irq_unmask = gpio_rcar_irq_enable;
        irq_chip->irq_set_type = gpio_rcar_irq_set_type;
        irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
-       irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
-       irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
-       irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
-       irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
        irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
 
        ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
 err1:
        gpiochip_remove(gpio_chip);
 err0:
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
        return ret;
 }
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
 
        gpiochip_remove(&p->gpio_chip);
 
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
index 682070d20f001dce1aef51147d99aca97cb2938c..2dc52585e3f2f1cc79664beba50ef453dcea1950 100644 (file)
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
                lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
                if (lookup) {
                        lookup->adev = adev;
-                       lookup->con_id = con_id;
+                       lookup->con_id = kstrdup(con_id, GFP_KERNEL);
                        list_add_tail(&lookup->node, &acpi_crs_lookup_list);
                }
        }
index 72065532c1c7bfda9796c55e2d350d2a1da00349..b747c76fd2b1f4f8f1e117b6ce2836525b1166fd 100644 (file)
@@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices);
 static void gpiochip_free_hogs(struct gpio_chip *chip);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
 
+static bool gpiolib_initialized;
 
 static inline void desc_set_label(struct gpio_desc *d, const char *label)
 {
@@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev)
        cdev_del(&gdev->chrdev);
        list_del(&gdev->list);
        ida_simple_remove(&gpio_ida, gdev->id);
+       kfree(gdev->label);
+       kfree(gdev->descs);
        kfree(gdev);
 }
 
+static int gpiochip_setup_dev(struct gpio_device *gdev)
+{
+       int status;
+
+       cdev_init(&gdev->chrdev, &gpio_fileops);
+       gdev->chrdev.owner = THIS_MODULE;
+       gdev->chrdev.kobj.parent = &gdev->dev.kobj;
+       gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
+       status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
+       if (status < 0)
+               chip_warn(gdev->chip, "failed to add char device %d:%d\n",
+                         MAJOR(gpio_devt), gdev->id);
+       else
+               chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+                        MAJOR(gpio_devt), gdev->id);
+       status = device_add(&gdev->dev);
+       if (status)
+               goto err_remove_chardev;
+
+       status = gpiochip_sysfs_register(gdev);
+       if (status)
+               goto err_remove_device;
+
+       /* From this point, the .release() function cleans up gpio_device */
+       gdev->dev.release = gpiodevice_release;
+       get_device(&gdev->dev);
+       pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
+                __func__, gdev->base, gdev->base + gdev->ngpio - 1,
+                dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+
+       return 0;
+
+err_remove_device:
+       device_del(&gdev->dev);
+err_remove_chardev:
+       cdev_del(&gdev->chrdev);
+       return status;
+}
+
+static void gpiochip_setup_devs(void)
+{
+       struct gpio_device *gdev;
+       int err;
+
+       list_for_each_entry(gdev, &gpio_devices, list) {
+               err = gpiochip_setup_dev(gdev);
+               if (err)
+                       pr_err("%s: Failed to initialize gpio device (%d)\n",
+                              dev_name(&gdev->dev), err);
+       }
+}
+
 /**
  * gpiochip_add_data() - register a gpio_chip
  * @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev)
  * the gpio framework's arch_initcall().  Otherwise sysfs initialization
  * for GPIOs will fail rudely.
  *
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
  * If chip->base is negative, this requests dynamic assignment of
  * a range of valid GPIOs.
  */
@@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
        else
                gdev->owner = THIS_MODULE;
 
-       gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio,
-                                  sizeof(gdev->descs[0]), GFP_KERNEL);
+       gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
        if (!gdev->descs) {
                status = -ENOMEM;
                goto err_free_gdev;
@@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
        if (chip->ngpio == 0) {
                chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
                status = -EINVAL;
-               goto err_free_gdev;
+               goto err_free_descs;
        }
 
        if (chip->label)
-               gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL);
+               gdev->label = kstrdup(chip->label, GFP_KERNEL);
        else
-               gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL);
+               gdev->label = kstrdup("unknown", GFP_KERNEL);
        if (!gdev->label) {
                status = -ENOMEM;
-               goto err_free_gdev;
+               goto err_free_descs;
        }
 
        gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
                if (base < 0) {
                        status = base;
                        spin_unlock_irqrestore(&gpio_lock, flags);
-                       goto err_free_gdev;
+                       goto err_free_label;
                }
                /*
                 * TODO: it should not be necessary to reflect the assigned
@@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
        status = gpiodev_add_to_list(gdev);
        if (status) {
                spin_unlock_irqrestore(&gpio_lock, flags);
-               goto err_free_gdev;
+               goto err_free_label;
        }
 
        for (i = 0; i < chip->ngpio; i++) {
@@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
         * we get a device node entry in sysfs under
         * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
         * coldplug of device nodes and other udev business.
+        * We can do this only if gpiolib has been initialized.
+        * Otherwise, defer until later.
         */
-       cdev_init(&gdev->chrdev, &gpio_fileops);
-       gdev->chrdev.owner = THIS_MODULE;
-       gdev->chrdev.kobj.parent = &gdev->dev.kobj;
-       gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
-       status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
-       if (status < 0)
-               chip_warn(chip, "failed to add char device %d:%d\n",
-                         MAJOR(gpio_devt), gdev->id);
-       else
-               chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
-                        MAJOR(gpio_devt), gdev->id);
-       status = device_add(&gdev->dev);
-       if (status)
-               goto err_remove_chardev;
-
-       status = gpiochip_sysfs_register(gdev);
-       if (status)
-               goto err_remove_device;
-
-       /* From this point, the .release() function cleans up gpio_device */
-       gdev->dev.release = gpiodevice_release;
-       get_device(&gdev->dev);
-       pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
-                __func__, gdev->base, gdev->base + gdev->ngpio - 1,
-                dev_name(&gdev->dev), chip->label ? : "generic");
-
+       if (gpiolib_initialized) {
+               status = gpiochip_setup_dev(gdev);
+               if (status)
+                       goto err_remove_chip;
+       }
        return 0;
 
-err_remove_device:
-       device_del(&gdev->dev);
-err_remove_chardev:
-       cdev_del(&gdev->chrdev);
 err_remove_chip:
        acpi_gpiochip_remove(chip);
        gpiochip_free_hogs(chip);
@@ -637,6 +671,10 @@ err_remove_from_list:
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&gdev->list);
        spin_unlock_irqrestore(&gpio_lock, flags);
+err_free_label:
+       kfree(gdev->label);
+err_free_descs:
+       kfree(gdev->descs);
 err_free_gdev:
        ida_simple_remove(&gpio_ida, gdev->id);
        /* failures here can mean systems won't boot... */
@@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
        return desc;
 }
 
-static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *acpi_find_gpio(struct device *dev,
+                                       const char *con_id,
                                        unsigned int idx,
-                                       enum gpio_lookup_flags *flags)
+                                       enum gpiod_flags flags,
+                                       enum gpio_lookup_flags *lookupflags)
 {
        struct acpi_device *adev = ACPI_COMPANION(dev);
        struct acpi_gpio_info info;
@@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
                desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
                if (IS_ERR(desc))
                        return desc;
+
+               if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+                   info.gpioint) {
+                       dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+                       return ERR_PTR(-ENOENT);
+               }
        }
 
        if (info.polarity == GPIO_ACTIVE_LOW)
-               *flags |= GPIO_ACTIVE_LOW;
+               *lookupflags |= GPIO_ACTIVE_LOW;
 
        return desc;
 }
@@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                        desc = of_find_gpio(dev, con_id, idx, &lookupflags);
                } else if (ACPI_COMPANION(dev)) {
                        dev_dbg(dev, "using ACPI for GPIO lookup\n");
-                       desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+                       desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
                }
        }
 
@@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void)
        if (ret < 0) {
                pr_err("gpiolib: failed to allocate char dev region\n");
                bus_unregister(&gpio_bus_type);
+       } else {
+               gpiolib_initialized = true;
+               gpiochip_setup_devs();
        }
        return ret;
 }
index c4a21c6428f5f526289c1da50b77d4a77157fe60..1bcbade479dc2e384d4abf0481f5571c95699a16 100644 (file)
@@ -1591,6 +1591,8 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
+       unsigned                fw_version;
+       void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
@@ -2033,6 +2035,7 @@ struct amdgpu_device {
 
        /* tracking pinned memory */
        u64 vram_pin_size;
+       u64 invisible_pin_size;
        u64 gart_pin_size;
 
        /* amdkfd interface */
index d6b0bff510aaa719dbb0f9cce269645b06716127..b7b583c42ea82410b2c80825185da124e385219c 100644 (file)
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
        struct acp_pm_domain *apd;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* return early if no ACP */
+       if (!adev->acp.acp_genpd)
+               return 0;
+
        /* SMU block will power on ACP irrespective of ACP runtime status.
         * Power off explicitly based on genpd ACP runtime status so that ACP
         * hw and ACP-genpd status are in sync.
index 0020a0ea43ffbfe029e0501f955a3054c2480881..35a1248aaa7778a04fa64957d4d2b65a3911ab9c 100644 (file)
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
        return amdgpu_atpx_priv.atpx_detected;
 }
 
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
-       return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
  */
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
+       /* make sure required functions are enabled */
+       /* dGPU power control is required */
+       if (atpx->functions.power_cntl == false) {
+               printk("ATPX dGPU power cntl not present, forcing\n");
+               atpx->functions.power_cntl = true;
+       }
+
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
index 7a4b101e10c63564aa3b0b8fffc638c171c1d199..6043dc7c3a94d75350a6e8760376b772f0fc4baf 100644 (file)
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
        struct drm_device *ddev = adev->ddev;
        struct drm_crtc *crtc;
        uint32_t line_time_us, vblank_lines;
+       struct cgs_mode_info *mode_info;
 
        if (info == NULL)
                return -EINVAL;
 
+       mode_info = info->mode_info;
+
        if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
                list_for_each_entry(crtc,
                                &ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
                                info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
                                info->display_count++;
                        }
-                       if (info->mode_info != NULL &&
+                       if (mode_info != NULL &&
                                crtc->enabled && amdgpu_crtc->enabled &&
                                amdgpu_crtc->hw_mode.clock) {
                                line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
                                vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
                                                        amdgpu_crtc->hw_mode.crtc_vdisplay +
                                                        (amdgpu_crtc->v_border * 2);
-                               info->mode_info->vblank_time_us = vblank_lines * line_time_us;
-                               info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-                               info->mode_info->ref_clock = adev->clock.spll.reference_freq;
-                               info->mode_info++;
+                               mode_info->vblank_time_us = vblank_lines * line_time_us;
+                               mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+                               mode_info->ref_clock = adev->clock.spll.reference_freq;
+                               mode_info = NULL;
                        }
                }
        }
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
        return 0;
 }
 
+
+static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+{
+       CGS_FUNC_ADEV;
+
+       adev->pm.dpm_enabled = enabled;
+
+       return 0;
+}
+
 /** \brief evaluate acpi namespace object, handle or pathname must be valid
  *  \param cgs_device
  *  \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
        amdgpu_cgs_set_powergating_state,
        amdgpu_cgs_set_clockgating_state,
        amdgpu_cgs_get_active_displays_info,
+       amdgpu_cgs_notify_dpm_enabled,
        amdgpu_cgs_call_acpi_method,
        amdgpu_cgs_query_system_info,
 };
index 612117478b5701a8b0ddbd76f3bdd36ea4099797..2139da773da68d410f4310a321d73634166d89d2 100644 (file)
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
        "LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 bool amdgpu_device_is_px(struct drm_device *dev)
 {
        struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
        if (amdgpu_runtime_pm == 1)
                runtime = true;
-       if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+       if (amdgpu_device_is_px(ddev))
                runtime = true;
        vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
        if (runtime)
index f0ed974bd4e090f65881d5f3604475d7e44f38ee..3fb405b3a61457e4622dfe4b788ac3925d088f4a 100644 (file)
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
        if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
                return true;
 
-       fence_put(*f);
+       fence_put(fence);
        return false;
 }
 
index 4303b447efe898145433985079a6cd10273acc81..d81f1f4883a6c4b1adff92ba0c32b52f1999b2fd 100644 (file)
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_fence *fence;
-       struct fence **ptr;
+       struct fence *old, **ptr;
        uint32_t seq;
 
        fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
        /* This function can't be called concurrently anyway, otherwise
         * emitting the fence would mess up the hardware ring buffer.
         */
-       BUG_ON(rcu_dereference_protected(*ptr, 1));
+       old = rcu_dereference_protected(*ptr, 1);
+       if (old && !fence_is_signaled(old)) {
+               DRM_INFO("rcu slot is busy\n");
+               fence_wait(old, false);
+       }
 
        rcu_assign_pointer(*ptr, fence_get(&fence->base));
 
index f594cfaa97e513f0bc6d436e26430abc8783e5ce..762cfdb85147471855418fff997877951d4aca6d 100644 (file)
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
        if (r) {
                return r;
        }
+       adev->ddev->vblank_disable_allowed = true;
+
        /* enable msi */
        adev->irq.msi_enabled = false;
 
index 7805a8706af7bfb9cde33e30a5761db9eab0abd4..b04337de65d193884ba823f511df802b08aadec6 100644 (file)
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        fw_info.feature = adev->vce.fb_version;
                        break;
                case AMDGPU_INFO_FW_UVD:
-                       fw_info.ver = 0;
+                       fw_info.ver = adev->uvd.fw_version;
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_GMC:
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                struct drm_amdgpu_info_vram_gtt vram_gtt;
 
                vram_gtt.vram_size = adev->mc.real_vram_size;
+               vram_gtt.vram_size -= adev->vram_pin_size;
                vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
-               vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+               vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
                vram_gtt.gtt_size  = adev->mc.gtt_size;
                vram_gtt.gtt_size -= adev->gart_pin_size;
                return copy_to_user(out, &vram_gtt,
index 8d432e6901af50d2267089757685f2e6f16f4f40..81bd964d3dfc2f12cba2bf0e64b63c677e964d37 100644 (file)
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
 
 #define AMDGPU_MAX_HPD_PINS 6
 #define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
 
 enum amdgpu_rmx_type {
        RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
        struct atom_context *atom_context;
        struct card_info *atom_card_info;
        bool mode_config_initialized;
-       struct amdgpu_crtc *crtcs[6];
-       struct amdgpu_afmt *afmt[7];
+       struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+       struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
        /* DAC enable load detect */
index 56d1458393ccfd86992be99d5b19db235ac40f11..7ecea83ce453cae868306b95b1600a01653a6de7 100644 (file)
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                bo->pin_count = 1;
                if (gpu_addr != NULL)
                        *gpu_addr = amdgpu_bo_gpu_offset(bo);
-               if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+               if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                        bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-               else
+                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                               bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+               } else
                        bo->adev->gart_pin_size += amdgpu_bo_size(bo);
        } else {
                dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        }
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
-               if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
                        bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-               else
+                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                               bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+               } else
                        bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
        } else {
                dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -476,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
        return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 }
 
+static const char *amdgpu_vram_names[] = {
+       "UNKNOWN",
+       "GDDR1",
+       "DDR2",
+       "GDDR3",
+       "GDDR4",
+       "GDDR5",
+       "HBM",
+       "DDR3"
+};
+
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
        /* Add an MTRR for the VRAM */
@@ -484,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
        DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
                adev->mc.mc_vram_size >> 20,
                (unsigned long long)adev->mc.aper_size >> 20);
-       DRM_INFO("RAM width %dbits DDR\n",
-                       adev->mc.vram_width);
+       DRM_INFO("RAM width %dbits %s\n",
+                adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
        return amdgpu_ttm_init(adev);
 }
 
@@ -526,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        if (!metadata_size) {
                if (bo->metadata_size) {
                        kfree(bo->metadata);
+                       bo->metadata = NULL;
                        bo->metadata_size = 0;
                }
                return 0;
index 3cb6d6c413c71b0207fe1766d26ca190a083e880..e9c6ae6ed2f73357d762f9aa75406c010238dd4a 100644 (file)
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
                                        adev->powerplay.pp_handle);
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled) {
+       if (adev->pp_enabled && adev->pm.dpm_enabled) {
                amdgpu_pm_sysfs_init(adev);
                amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
        }
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
                                        adev->powerplay.pp_handle);
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled) {
-               if (amdgpu_dpm == 0)
-                       adev->pm.dpm_enabled = false;
-               else
-                       adev->pm.dpm_enabled = true;
-       }
+       if (adev->pp_enabled)
+               adev->pm.dpm_enabled = true;
 #endif
 
        return ret;
index f1a55d1888cbbd343db0a69f02ad9ce52aa67084..11af4492b4bee2e3bf29d7910e9d25b3adb5f843 100644 (file)
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
 
+       if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
@@ -622,7 +624,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        sg_free_table(ttm->sg);
index c1a5810444174522193111d9a41ff26d6fc0eb16..871018c634e0af896aa5a864bd87ee0653e35396 100644 (file)
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
                version_major, version_minor, family_id);
 
+       adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+                               (family_id << 8));
+
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -241,32 +244,30 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->uvd.ring;
-       int i, r;
+       unsigned size;
+       void *ptr;
+       int i;
 
        if (adev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
-               uint32_t handle = atomic_read(&adev->uvd.handles[i]);
-               if (handle != 0) {
-                       struct fence *fence;
+       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+               if (atomic_read(&adev->uvd.handles[i]))
+                       break;
 
-                       amdgpu_uvd_note_usage(adev);
+       if (i == AMDGPU_MAX_UVD_HANDLES)
+               return 0;
 
-                       r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
-                       if (r) {
-                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
-                               continue;
-                       }
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
 
-                       fence_wait(fence, false);
-                       fence_put(fence);
+       size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+       ptr = adev->uvd.cpu_addr;
 
-                       adev->uvd.filp[i] = NULL;
-                       atomic_set(&adev->uvd.handles[i], 0);
-               }
-       }
+       adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+       if (!adev->uvd.saved_bo)
+               return -ENOMEM;
+
+       memcpy(adev->uvd.saved_bo, ptr, size);
 
        return 0;
 }
@@ -275,23 +276,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 {
        unsigned size;
        void *ptr;
-       const struct common_firmware_header *hdr;
-       unsigned offset;
 
        if (adev->uvd.vcpu_bo == NULL)
                return -EINVAL;
 
-       hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
-       offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-       memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
-               (adev->uvd.fw->size) - offset);
-
        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-       size -= le32_to_cpu(hdr->ucode_size_bytes);
        ptr = adev->uvd.cpu_addr;
-       ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-       memset(ptr, 0, size);
+       if (adev->uvd.saved_bo != NULL) {
+               memcpy(ptr, adev->uvd.saved_bo, size);
+               kfree(adev->uvd.saved_bo);
+               adev->uvd.saved_bo = NULL;
+       } else {
+               const struct common_firmware_header *hdr;
+               unsigned offset;
+
+               hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+               offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+               memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+                       (adev->uvd.fw->size) - offset);
+               size -= le32_to_cpu(hdr->ucode_size_bytes);
+               ptr += le32_to_cpu(hdr->ucode_size_bytes);
+               memset(ptr, 0, size);
+       }
 
        return 0;
 }
index 4bec0c108cea9887ed612b252fc6b990547f029a..481a64fa9b470bfa7b2c2290bf6f85b882b4066a 100644 (file)
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index 1e0bba29e16796f97c8eee38fc9d3100c405f6bc..1cd6de575305aa3f657fa1a7f5a70232b272d739 100644 (file)
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
                amdgpu_panel_mode_fixup(encoder, adjusted_mode);
index 82ce7d9438843283d6afd1d7eb764fb26de266ae..a4a2e6cc61bb975a190036317f474a27651dc7ee 100644 (file)
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
        gmc_v7_0_set_gart_funcs(adev);
        gmc_v7_0_set_irq_funcs(adev);
 
-       if (adev->flags & AMD_IS_APU) {
-               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-       } else {
-               u32 tmp = RREG32(mmMC_SEQ_MISC0);
-               tmp &= MC_SEQ_MISC0__MT__MASK;
-               adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
-       }
-
        return 0;
 }
 
@@ -918,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
 static int gmc_v7_0_sw_init(void *handle)
@@ -927,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
        int dma_bits;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (adev->flags & AMD_IS_APU) {
+               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+       } else {
+               u32 tmp = RREG32(mmMC_SEQ_MISC0);
+               tmp &= MC_SEQ_MISC0__MT__MASK;
+               adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+       }
+
        r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
        if (r)
                return r;
index 29bd7b57dc912be1b33fdc06ce7ab8944527c804..7a9db2c72c894853478693da97576b9b8b9cebbc 100644 (file)
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
        gmc_v8_0_set_gart_funcs(adev);
        gmc_v8_0_set_irq_funcs(adev);
 
-       if (adev->flags & AMD_IS_APU) {
-               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-       } else {
-               u32 tmp = RREG32(mmMC_SEQ_MISC0);
-               tmp &= MC_SEQ_MISC0__MT__MASK;
-               adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
-       }
-
        return 0;
 }
 
@@ -878,15 +870,33 @@ static int gmc_v8_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
 static int gmc_v8_0_sw_init(void *handle)
 {
        int r;
        int dma_bits;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (adev->flags & AMD_IS_APU) {
+               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+       } else {
+               u32 tmp;
+
+               if (adev->asic_type == CHIP_FIJI)
+                       tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+               else
+                       tmp = RREG32(mmMC_SEQ_MISC0);
+               tmp &= MC_SEQ_MISC0__MT__MASK;
+               adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+       }
+
        r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
        if (r)
                return r;
index b6f7d7bff92989737511e8cc20452b6eecf89f05..0f14199cf716e157aa5638b7adebc041958225ac 100644 (file)
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
 
        amdgpu_irq_fini(adev);
        amdgpu_ih_ring_fini(adev);
-       amdgpu_irq_add_domain(adev);
+       amdgpu_irq_remove_domain(adev);
 
        return 0;
 }
index c606ccb38d8b98bd8e05fd588352102cd3601cbc..cb463753115b8d4f8266464a9d69956be21a5750 100644 (file)
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_uvd_suspend(adev);
+       r = uvd_v4_2_hw_fini(adev);
        if (r)
                return r;
 
-       r = uvd_v4_2_hw_fini(adev);
+       r = amdgpu_uvd_suspend(adev);
        if (r)
                return r;
 
index e3c852d9d79a45285c0dcb9445e43896be763eae..16476d80f475a77c17ae4bb8a5f60aa05c4562c8 100644 (file)
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_uvd_suspend(adev);
+       r = uvd_v5_0_hw_fini(adev);
        if (r)
                return r;
 
-       r = uvd_v5_0_hw_fini(adev);
+       r = amdgpu_uvd_suspend(adev);
        if (r)
                return r;
 
index 3375e614ac67121f5832f56e0e480a1166601481..d49379145ef22e8468866a1b60bb17d40ebb5e70 100644 (file)
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       r = uvd_v6_0_hw_fini(adev);
+       if (r)
+               return r;
+
        /* Skip this for APU for now */
        if (!(adev->flags & AMD_IS_APU)) {
                r = amdgpu_uvd_suspend(adev);
                if (r)
                        return r;
        }
-       r = uvd_v6_0_hw_fini(adev);
-       if (r)
-               return r;
 
        return r;
 }
index aec38fc3834f9d867b4477c2f1d1d1166285f1fe..ab84d494724774d77100756c85c6d2a689b25516 100644 (file)
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
                                        void *cgs_device,
                                        struct cgs_display_info *info);
 
+typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+
 typedef int (*cgs_call_acpi_method)(void *cgs_device,
                                        uint32_t acpi_method,
                                        uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
        cgs_set_clockgating_state set_clockgating_state;
        /* display manager */
        cgs_get_active_displays_info get_active_displays_info;
+       /* notify dpm enabled */
+       cgs_notify_dpm_enabled notify_dpm_enabled;
        /* ACPI */
        cgs_call_acpi_method call_acpi_method;
        /* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
        CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)      \
        CGS_CALL(set_clockgating_state, dev, block_type, state)
+#define cgs_notify_dpm_enabled(dev, enabled)   \
+       CGS_CALL(notify_dpm_enabled, dev, enabled)
+
 #define cgs_get_active_displays_info(dev, info)        \
        CGS_CALL(get_active_displays_info, dev, info)
+
 #define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
        CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
 #define cgs_query_system_info(dev, sys_info)   \
index 6b52c78cb404870ae019b3cd262f7d9711b590b3..56856a2864d108c89cb2a961398f8f3613159fcc 100644 (file)
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
        reset_display_configCounter_tasks,
        update_dal_configuration_tasks,
        vari_bright_resume_tasks,
-       block_adjust_power_state_tasks,
        setup_asic_tasks,
        enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
        enable_dynamic_state_management_tasks,
        enable_clock_power_gatings_tasks,
        enable_disable_bapm_tasks,
        initialize_thermal_controller_tasks,
-       reset_boot_state_tasks,
+       get_2d_performance_state_tasks,
+       set_performance_state_tasks,
        adjust_power_state_tasks,
        enable_disable_fps_tasks,
        notify_hw_power_source_tasks,
index 51dedf84623c6b7473e3c753143e95fa7791e1e8..89f31bc5b68b9e061d31a2fb2494c7e4f60d276f 100644 (file)
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
 
        for(count = 0; count < table->VceLevelCount; count++) {
                table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage = 0;
                table->VceLevel[count].MinVoltage |=
                                (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
                table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
 
        for (count = 0; count < table->SamuLevelCount; count++) {
                /* not sure whether we need evclk or not */
+               table->SamuLevel[count].MinVoltage = 0;
                table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
                table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
        table->UvdBootLevel = 0;
 
        for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].MinVoltage = 0;
                table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
                table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
                table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
        if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
                fiji_populate_smc_voltage_tables(hwmgr, table);
 
+       table->SystemFlags = 0;
+
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_AutomaticDCTransition))
                table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
        table->MemoryThermThrottleEnable = 1;
        table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
        table->PCIeGenInterval = 1;
+       table->VRConfig = 0;
 
        result = fiji_populate_vr_config(hwmgr, table);
        PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
        return size;
 }
 
+static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
+                                                          const struct fiji_performance_level *pl2)
+{
+       return ((pl1->memory_clock == pl2->memory_clock) &&
+                 (pl1->engine_clock == pl2->engine_clock) &&
+                 (pl1->pcie_gen == pl2->pcie_gen) &&
+                 (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+       const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
+       const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
+       int i;
+
+       if (equal == NULL || psa == NULL || psb == NULL)
+               return -EINVAL;
+
+       /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+       if (psa->performance_level_count != psb->performance_level_count) {
+               *equal = false;
+               return 0;
+       }
+
+       for (i = 0; i < psa->performance_level_count; i++) {
+               if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+                       /* If we have found even one performance level pair that is different the states are different. */
+                       *equal = false;
+                       return 0;
+               }
+       }
+
+       /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+       *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+       *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+       *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+       *equal &= (psa->acp_clk == psb->acp_clk);
+
+       return 0;
+}
+
+bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       bool is_update_required = false;
+       struct cgs_display_info info = {0,0,NULL};
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       if (data->display_timing.num_existing_displays != info.display_count)
+               is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+       if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+               cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+               if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+                       is_update_required = true;
+*/
+       return is_update_required;
+}
+
+
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .backend_init = &fiji_hwmgr_backend_init,
        .backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
        .set_fan_control_mode = fiji_set_fan_control_mode,
        .get_fan_control_mode = fiji_get_fan_control_mode,
+       .check_states_equal = fiji_check_states_equal,
+       .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
        .get_pp_table = fiji_get_pp_table,
        .set_pp_table = fiji_set_pp_table,
        .force_clock_level = fiji_force_clock_level,
index be31bed2538ade909b95b4cc95c6c24602466c35..fa208ada689219f4eb072ebb5e655e0a03cc9ab7 100644 (file)
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
 
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
 
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
        if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
                acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
                phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
 
 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
+       int ret = 1;
+       bool enabled;
        PHM_FUNC_CHECK(hwmgr);
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                PHM_PlatformCaps_TablelessHardwareInterface)) {
                if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
-                       return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
+                       ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
        } else {
-               return phm_dispatch_table(hwmgr,
+               ret = phm_dispatch_table(hwmgr,
                                &(hwmgr->enable_dynamic_state_management),
                                NULL, NULL);
        }
-       return 0;
+
+       enabled = ret == 0 ? true : false;
+
+       cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+       return ret;
 }
 
 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
index 56b829f9769952e08aee6938bc26af6d74c3ccb5..3ac1ae4d8cafb69993035ab505727c3d127cc962 100644 (file)
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
                DRM_ERROR("failed to map control registers area\n");
                ret = PTR_ERR(hdlcd->mmio);
                hdlcd->mmio = NULL;
-               goto fail;
+               return ret;
        }
 
        version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
        if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
                DRM_ERROR("unknown product id: 0x%x\n", version);
-               ret = -EINVAL;
-               goto fail;
+               return -EINVAL;
        }
        DRM_INFO("found ARM HDLCD version r%dp%d\n",
                (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
        /* Get the optional framebuffer memory resource */
        ret = of_reserved_mem_device_init(drm->dev);
        if (ret && ret != -ENODEV)
-               goto fail;
+               return ret;
 
        ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
        if (ret)
@@ -101,8 +100,6 @@ irq_fail:
        drm_crtc_cleanup(&hdlcd->crtc);
 setup_fail:
        of_reserved_mem_device_release(drm->dev);
-fail:
-       devm_clk_put(drm->dev, hdlcd->clk);
 
        return ret;
 }
@@ -412,7 +409,6 @@ err_unload:
        pm_runtime_put_sync(drm->dev);
        pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
-       devm_clk_put(dev, hdlcd->clk);
 err_free:
        drm_dev_unref(drm);
 
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
        pm_runtime_put_sync(drm->dev);
        pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
-       if (!IS_ERR(hdlcd->clk)) {
-               devm_clk_put(drm->dev, hdlcd->clk);
-               hdlcd->clk = NULL;
-       }
        drm_mode_config_cleanup(drm);
        drm_dev_unregister(drm);
        drm_dev_unref(drm);
index 6e731db31aa49c9c65e9f6eebd3b789ca4daa78b..aca7f9cc610929e5e75fa032b77b1ba79c4dfcc4 100644 (file)
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 
  release:
        for_each_sg(sgt->sgl, sg, num, i)
-               page_cache_release(sg_page(sg));
+               put_page(sg_page(sg));
  free_table:
        sg_free_table(sgt);
  free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
        if (dobj->obj.filp) {
                struct scatterlist *sg;
                for_each_sg(sgt->sgl, sg, sgt->nents, i)
-                       page_cache_release(sg_page(sg));
+                       put_page(sg_page(sg));
        }
 
        sg_free_table(sgt);
index 27fbd79d0daf0003be2014cee00c9dcdb49fdb43..71ea0521ea96a94673752d56371516c33be84e1b 100644 (file)
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (!port)
+               return -EINVAL;
+
        port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
        if (!mstb) {
                mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
-               if (!mstb)
+               if (!mstb) {
+                       drm_dp_put_port(port);
                        return -EINVAL;
+               }
        }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        kfree(txmsg);
 fail_put:
        drm_dp_put_mst_branch_device(mstb);
+       drm_dp_put_port(port);
        return ret;
 }
 
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                req_payload.start_slot = cur_slots;
                if (mgr->proposed_vcpis[i]) {
                        port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+                       port = drm_dp_get_validated_port_ref(mgr, port);
+                       if (!port) {
+                               mutex_unlock(&mgr->payload_lock);
+                               return -EINVAL;
+                       }
                        req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
                        req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
                } else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                        mgr->payloads[i].payload_state = req_payload.payload_state;
                }
                cur_slots += req_payload.num_slots;
+
+               if (port)
+                       drm_dp_put_port(port);
        }
 
        for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
 
        if (mgr->mst_primary) {
                int sret;
+               u8 guid[16];
+
                sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
                if (sret != DP_RECEIVER_CAP_SIZE) {
                        DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
                        ret = -1;
                        goto out_unlock;
                }
+
+               /* Some hubs forget their guids after they resume */
+               sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+               if (sret != 16) {
+                       DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+                       ret = -1;
+                       goto out_unlock;
+               }
+               drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
                ret = 0;
        } else
                ret = -1;
index 414d7f61aa05cbacb78542b1e3154ea700f6e3bd..558ef9fc39e6c2104b90725950fb7a562e9eefa4 100644 (file)
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 0x0f - 1024x768@43Hz, interlace */
        { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
-                  1208, 1264, 0, 768, 768, 772, 817, 0,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                   DRM_MODE_FLAG_INTERLACE) },
        /* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
                   720, 840, 0, 480, 481, 484, 500, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-                  704,  832, 0, 480, 489, 491, 520, 0,
+                  704,  832, 0, 480, 489, 492, 520, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
                   768,  864, 0, 480, 483, 486, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
-       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
        { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
        { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
                   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
-       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
                   1136, 1312, 0,  768, 769, 772, 800, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
        { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
 {
        int i, j, m, modes = 0;
        struct drm_display_mode *mode;
-       u8 *est = ((u8 *)timing) + 5;
+       u8 *est = ((u8 *)timing) + 6;
 
        for (i = 0; i < 6; i++) {
                for (j = 7; j >= 0; j--) {
index 2e8c77e71e1f699fc2837f2b5d67d24e52729a6d..da0c5320789fbf96a44869618e2cbdda704d2d3f 100644 (file)
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 
 fail:
        while (i--)
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
 
        drm_free_large(pages);
        return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                        mark_page_accessed(pages[i]);
 
                /* Undo the reference we took when populating the table */
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
 
        drm_free_large(pages);
index 09198d0b58140c8d00499588d641c7116011bc77..306dde18a94a01c8cc623a96bd7743c441087309 100644 (file)
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
                goto fail;
        }
 
+       /*
+        * Set the GPU linear window to be at the end of the DMA window, where
+        * the CMA area is likely to reside. This ensures that we are able to
+        * map the command buffers while having the linear window overlap as
+        * much RAM as possible, so we can optimize mappings for other buffers.
+        *
+        * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+        * to different views of the memory on the individual engines.
+        */
+       if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+           (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+               u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+               if (dma_mask < PHYS_OFFSET + SZ_2G)
+                       gpu->memory_base = PHYS_OFFSET;
+               else
+                       gpu->memory_base = dma_mask - SZ_2G + 1;
+       }
+
        ret = etnaviv_hw_reset(gpu);
        if (ret)
                goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct etnaviv_gpu *gpu;
-       u32 dma_mask;
        int err = 0;
 
        gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
        gpu->dev = &pdev->dev;
        mutex_init(&gpu->lock);
 
-       /*
-        * Set the GPU linear window to be at the end of the DMA window, where
-        * the CMA area is likely to reside. This ensures that we are able to
-        * map the command buffers while having the linear window overlap as
-        * much RAM as possible, so we can optimize mappings for other buffers.
-        */
-       dma_mask = (u32)dma_get_required_mask(dev);
-       if (dma_mask < PHYS_OFFSET + SZ_2G)
-               gpu->memory_base = PHYS_OFFSET;
-       else
-               gpu->memory_base = dma_mask - SZ_2G + 1;
-
        /* Map registers: */
        gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
        if (IS_ERR(gpu->mmio))
index f17d3927959604301e9e53c7614fcb1e4982e80c..baddf33fb475863379bcffcd7befb4da085e37f2 100644 (file)
@@ -94,7 +94,7 @@ comment "Sub-drivers"
 
 config DRM_EXYNOS_G2D
        bool "G2D"
-       depends on !VIDEO_SAMSUNG_S5P_G2D
+       depends on VIDEO_SAMSUNG_S5P_G2D=n
        select FRAME_VECTOR
        help
          Choose this option if you want to use Exynos G2D for DRM.
index 968b31c522b2ddaa9cc8ed7fc4f675f8e39964d8..23d2f958739b187b7bd28ae4e0f7f7ec7522da6f 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
-               exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
-               exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+               exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)    += exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)       += exynos5433_drm_decon.o
index 7f55ba6771c6b94e5f45bee6bdec078c27c74f5b..011211e4167d41dce17d56e8baee0b168b37e7b6 100644 (file)
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 
 err:
-       list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+       list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
                if (subdrv->close)
                        subdrv->close(dev, subdrv->dev, file);
        }
index d614194644c814211ee069054a492fa1032c8fb0..81cc5537cf2577899bd5c1d011beb404584e08a6 100644 (file)
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
        return exynos_fb->dma_addr[index];
 }
 
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
-       struct exynos_drm_private *private = dev->dev_private;
-       struct drm_fb_helper *fb_helper = private->fb_helper;
-
-       if (fb_helper)
-               drm_fb_helper_hotplug_event(fb_helper);
-       else
-               exynos_drm_fbdev_init(dev);
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
index 4ae860c44f1d8e052c1ce3a721e1c7348d14adf0..72d7c0b7c216531c7f3d7af2d1136ee5a3ecb01b 100644 (file)
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
 
        drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
 }
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = private->fb_helper;
+
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
+       else
+               exynos_drm_fbdev_init(dev);
+}
index e16d7f0ae1920eb944364747b3f009e7976654f4..330eef87f7180b1b726ff41933874a6568b0474d 100644 (file)
 #ifndef _EXYNOS_DRM_FBDEV_H_
 #define _EXYNOS_DRM_FBDEV_H_
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
 int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
 void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
 
 #endif
index 51d484ae9f498c17a07ba43a0891ef8ebe0dd3a4..018449f8d55750d0546ac0891e78662a67a65ab3 100644 (file)
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
         * clock. On these SoCs the bootloader may enable it but any
         * power domain off/on will reset it to disable state.
         */
-       if (ctx->driver_data != &exynos5_fimd_driver_data ||
+       if (ctx->driver_data != &exynos5_fimd_driver_data &&
            ctx->driver_data != &exynos5420_fimd_driver_data)
                return;
 
index 9869d70e9e54af32abb775dc0e17f0c1da83999b..a0def0be6d650de02fad25fc3cf3248d89b8fce0 100644 (file)
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
        } else
                val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
 
-       regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+       ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
        if (ret)
                DRM_ERROR("mic: Failed to read system register\n");
 }
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
                                                        "samsung,disp-syscon");
        if (IS_ERR(mic->sysreg)) {
                DRM_ERROR("mic: Failed to get system register.\n");
+               ret = PTR_ERR(mic->sysreg);
                goto err;
        }
 
index d86227236f5519f93099565622956f00530b7e24..50185ac347b24352202b7d38b314222438ca9b20 100644 (file)
 
 #include <drm/drmP.h>
 
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
 }
 
 static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
 {
        struct drm_plane_state *state = &exynos_state->base;
-       struct drm_crtc *crtc = exynos_state->base.crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_existing_crtc_state(state->state, crtc);
+       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        unsigned int src_x, src_y;
index 7bb1f1aff932f3dc13b9884661e37982a5678c26..c52f9adf5e04c620cee1043edaa84f828ccffb43 100644 (file)
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
  * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
  * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
  */
-static int __deprecated
+static int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 {
        int error;
index 20e82008b8b6c683b9189d25699dc33a054b874a..6d2fb3f4ac628fddaaa5f6da8f724c3787330c5b 100644 (file)
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_display_resume(dev);
-
        intel_dp_mst_resume(dev);
 
+       intel_display_resume(dev);
+
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       int ret;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
index 10480939159c24435768a535b9f234fe00cbf387..daba7ebb969903d11dbf6d20a0e932bfc922f128 100644 (file)
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
-                                                 IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+                                                IS_SKL_GT3(dev) || \
+                                                IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
index 3d31d3ac589e8a10f4c446294a738ee10955f581..dabc08987b5e20389fa6b57872902bd4706d71cc 100644 (file)
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                drm_clflush_virt_range(vaddr, PAGE_SIZE);
                kunmap_atomic(src);
 
-               page_cache_release(page);
+               put_page(page);
                vaddr += PAGE_SIZE;
        }
 
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                        set_page_dirty(page);
                        if (obj->madv == I915_MADV_WILLNEED)
                                mark_page_accessed(page);
-                       page_cache_release(page);
+                       put_page(page);
                        vaddr += PAGE_SIZE;
                }
                obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                if (obj->madv == I915_MADV_WILLNEED)
                        mark_page_accessed(page);
 
-               page_cache_release(page);
+               put_page(page);
        }
        obj->dirty = 0;
 
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 err_pages:
        sg_mark_end(sg);
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               page_cache_release(sg_page_iter_page(&sg_iter));
+               put_page(sg_page_iter_page(&sg_iter));
        sg_free_table(st);
        kfree(st);
 
index 6be40f3ba2c79ea08b7b144b6a0de907767ddf93..4d30b60defda44adcbc24eea0be77d1e9278e804 100644 (file)
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
-               down_read(&mm->mmap_sem);
-               while (pinned < npages) {
-                       ret = get_user_pages_remote(work->task, mm,
-                                       obj->userptr.ptr + pinned * PAGE_SIZE,
-                                       npages - pinned,
-                                       !obj->userptr.read_only, 0,
-                                       pvec + pinned, NULL);
-                       if (ret < 0)
-                               break;
-
-                       pinned += ret;
+               ret = -EFAULT;
+               if (atomic_inc_not_zero(&mm->mm_users)) {
+                       down_read(&mm->mmap_sem);
+                       while (pinned < npages) {
+                               ret = get_user_pages_remote
+                                       (work->task, mm,
+                                        obj->userptr.ptr + pinned * PAGE_SIZE,
+                                        npages - pinned,
+                                        !obj->userptr.read_only, 0,
+                                        pvec + pinned, NULL);
+                               if (ret < 0)
+                                       break;
+
+                               pinned += ret;
+                       }
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
                }
-               up_read(&mm->mmap_sem);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -683,7 +688,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
        obj->dirty = 0;
 
index d1a46ef5ab3f4b051b42599805266cd5fbd2bc7e..1c212205d0e7fd856e3298a10f6c039210bb570f 100644 (file)
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        disable_rpm_wakeref_asserts(dev_priv);
 
-       for (;;) {
+       do {
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
 
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
                POSTING_READ(GEN8_MASTER_IRQ);
-       }
+       } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
index f76cbf3e5d1e1999afa61b62bf9740c308e552bb..fffdac801d3b0da03abdd65b2c90e50fb1913e54 100644 (file)
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 
-#define INTERVAL_1_28_US(us)   (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)   roundup(((us) * 100) >> 7, 25)
 #define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
 #define INTERVAL_0_833_US(us)  (((us) * 6) / 5)
 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
index 62de9f4bce09959a8deb756c0e51ecb4e202315a..3b57bf06abe8598c1c3b6fba0dc8e19b7f192619 100644 (file)
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
        } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-               ddi_translations_edp = bdw_ddi_translations_edp;
+
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations_edp = bdw_ddi_translations_edp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+               } else {
+                       ddi_translations_edp = bdw_ddi_translations_dp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+               }
+
                ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-               n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
                n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
                hdmi_default_entry = 7;
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        intel_ddi_clock_get(encoder, pipe_config);
 }
 
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
-       /* HDMI has nothing special to destroy, so we can go with this. */
-       intel_dp_encoder_destroy(encoder);
-}
-
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-       .destroy = intel_ddi_destroy,
+       .reset = intel_dp_encoder_reset,
+       .destroy = intel_dp_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->post_disable = intel_ddi_post_disable;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
 
        intel_dig_port->port = port;
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
index 6e0d8283daa66d6383516c28542850d5c7364a48..182f84937345d5b3d778d9521ca17f4b04608037 100644 (file)
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (state->legacy_cursor_update)
+                       continue;
+
                ret = intel_crtc_wait_for_pending_flips(crtc);
                if (ret)
                        return ret;
index f069a82deb57a42a814e2e5a30f5f1b1e4c51ad8..412a34c39522fc8474b87eaa02a5532f8a32d859 100644 (file)
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
        edp_panel_vdd_schedule_off(intel_dp);
 }
 
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp;
 
index a2bd698fe2f78f801813c99b097d7576e2f0c869..937e77228466eb22e66ac765d001078a418badfd 100644 (file)
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
 
+       intel_connector->unregister(intel_connector);
+
        /* need to nuke the connector */
        drm_modeset_lock_all(dev);
        if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 
                WARN(ret, "Disabling mst crtc failed with %i\n", ret);
        }
-       drm_modeset_unlock_all(dev);
 
-       intel_connector->unregister(intel_connector);
-
-       drm_modeset_lock_all(dev);
        intel_connector_remove_from_fbdev(intel_connector);
        drm_connector_cleanup(connector);
        drm_modeset_unlock_all(dev);
index 4c027d69fac92c99d6a67b6a98f98d9f67d84e60..7d3af3a72abea7ac557f5f107aaf597bc38d5165 100644 (file)
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
index a0d8daed24701cf279ec6a452538eb814e092022..1ab6f687f6408ff9d72956fec2ebee33640c1755 100644 (file)
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                                hdmi_to_dig_port(intel_hdmi));
        }
 
-       if (!live_status)
-               DRM_DEBUG_KMS("Live status not up!");
+       if (!live_status) {
+               DRM_DEBUG_KMS("HDMI live status down\n");
+               /*
+                * Live status register is not reliable on all intel platforms.
+                * So consider live_status only for certain platforms, for
+                * others, read EDID to determine presence of sink.
+                */
+               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+                       live_status = true;
+       }
 
        intel_hdmi_unset_edid(connector);
 
index 6a978ce8024436251009979cf1b1b47fa14ff549..5c6080fd09688aee82c24841fa9e4c114164f384 100644 (file)
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
index 30a8403a8f4fbaa25e8f55ca3655596bc298768e..cd9fe609aefbc2487ce94ab01e0d495dd12d8167 100644 (file)
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * and as part of the cleanup in the hw state restore we also redisable
         * the vga plane.
         */
-       if (!HAS_PCH_SPLIT(dev)) {
-               drm_modeset_lock_all(dev);
+       if (!HAS_PCH_SPLIT(dev))
                intel_display_resume(dev);
-               drm_modeset_unlock_all(dev);
-       }
 
        dev_priv->modeset_restore = MODESET_DONE;
 
index 347d4df49a9bf37cc751d9bfdedde6701709b2f8..8ed3cf34f82d31bbefa98847d1413f625e2b2f74 100644 (file)
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                             const struct drm_plane_state *pstate,
                             int y)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
        struct drm_framebuffer *fb = pstate->fb;
+       uint32_t width = 0, height = 0;
+
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(pstate->rotation))
+               swap(width, height);
 
        /* for planar format */
        if (fb->pixel_format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
-                       return intel_crtc->config->pipe_src_w *
-                               intel_crtc->config->pipe_src_h *
+                       return width * height *
                                drm_format_plane_cpp(fb->pixel_format, 0);
                else    /* uv-plane data rate */
-                       return (intel_crtc->config->pipe_src_w/2) *
-                               (intel_crtc->config->pipe_src_h/2) *
+                       return (width / 2) * (height / 2) *
                                drm_format_plane_cpp(fb->pixel_format, 1);
        }
 
        /* for packed formats */
-       return intel_crtc->config->pipe_src_w *
-               intel_crtc->config->pipe_src_h *
-               drm_format_plane_cpp(fb->pixel_format, 0);
+       return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
 }
 
 /*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                struct drm_framebuffer *fb = plane->state->fb;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (fb == NULL)
+               if (!to_intel_plane_state(plane->state)->visible)
                        continue;
+
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uint16_t plane_blocks, y_plane_blocks = 0;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (pstate->fb == NULL)
+               if (!to_intel_plane_state(pstate)->visible)
                        continue;
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 {
        struct drm_plane *plane = &intel_plane->base;
        struct drm_framebuffer *fb = plane->state->fb;
+       struct intel_plane_state *intel_pstate =
+                                       to_intel_plane_state(plane->state);
        uint32_t latency = dev_priv->wm.skl_latency[level];
        uint32_t method1, method2;
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t res_blocks, res_lines;
        uint32_t selected_result;
        uint8_t cpp;
+       uint32_t width = 0, height = 0;
 
-       if (latency == 0 || !cstate->base.active || !fb)
+       if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
                return false;
 
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(plane->state->rotation))
+               swap(width, height);
+
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
                                 cpp, latency);
        method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                cstate->pipe_src_w,
-                                cpp, fb->modifier[0],
+                                width,
+                                cpp,
+                                fb->modifier[0],
                                 latency);
 
-       plane_bytes_per_line = cstate->pipe_src_w * cpp;
+       plane_bytes_per_line = width * cpp;
        plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
index 45ce45a5e122046047b2fadc75f5f6f89f34c379..9121646d7c4dd98c4c32ed74343948e662059d06 100644 (file)
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
 
        /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
        tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
            IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+       /* This is tied to WaForceContextSaveRestoreNonCoherent */
+       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
                 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = ringbuf->obj;
+       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+       unsigned flags = PIN_OFFSET_BIAS | 4096;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
                if (ret)
                        return ret;
 
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return -ENOMEM;
                }
        } else {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+                                           flags | PIN_MAPPABLE);
                if (ret)
                        return ret;
 
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
index 436d8f2b86823d30bd7f33bc2247d7d905a787ab..68b6f69aa6820d88952173e4aca801ef56370a7c 100644 (file)
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
-               dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+               if (IS_HASWELL(dev))
+                       dev_priv->uncore.funcs.force_wake_put =
+                               fw_domains_put_with_fifo;
+               else
+                       dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
        } else if (IS_IVYBRIDGE(dev)) {
index 2a95d10e9d928d45877d5491c0b94e6ec18711d3..a24631fdf4add310de0eeadc7001c6a12a711866 100644 (file)
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
        if (!iores)
                return -ENXIO;
 
-       platform_set_drvdata(pdev, hdmi);
-
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
        /*
         * If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
        drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
                         DRM_MODE_ENCODER_TMDS, NULL);
 
-       return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+       ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+       /*
+        * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+        * which would have called the encoder cleanup.  Do it manually.
+        */
+       if (ret)
+               drm_encoder_cleanup(encoder);
+
+       return ret;
 }
 
 static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
index 9876e0f0c3e1833592308f1a8a38de5c3e44f2b0..e26dcdec2aba0e51233910c82f1788e49ea6f2ce 100644 (file)
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
        struct imx_drm_crtc *imx_drm_crtc;
-       int ret;
 
        /*
         * The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 
        *new_crtc = imx_drm_crtc;
 
-       ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
-       if (ret)
-               goto err_register;
-
        drm_crtc_helper_add(crtc,
                        imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
                        imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
 
        return 0;
-
-err_register:
-       imxdrm->crtc[--imxdrm->pipes] = NULL;
-       kfree(imx_drm_crtc);
-       return ret;
 }
 EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
 
index 588827844f30c7717b4afbb900de196411752990..681ec6eb77d916fc3be6a47d783fab54ed1f07fb 100644 (file)
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
 int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
                       int x, int y)
 {
-       struct drm_gem_cma_object *cma_obj;
-       unsigned long eba;
-       int active;
-
-       cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-       if (!cma_obj) {
-               DRM_DEBUG_KMS("entry is null.\n");
-               return -EFAULT;
+       struct drm_gem_cma_object *cma_obj[3];
+       unsigned long eba, ubo, vbo;
+       int active, i;
+
+       for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+               cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
+               if (!cma_obj[i]) {
+                       DRM_DEBUG_KMS("plane %d entry is null.\n", i);
+                       return -EFAULT;
+               }
        }
 
-       dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
-               &cma_obj->paddr, x, y);
-
-       eba = cma_obj->paddr + fb->offsets[0] +
+       eba = cma_obj[0]->paddr + fb->offsets[0] +
              fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
 
+       if (eba & 0x7) {
+               DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
+               return -EINVAL;
+       }
+
+       if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
+               DRM_DEBUG_KMS("pitches out of range.\n");
+               return -EINVAL;
+       }
+
+       if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
+               DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
+               return -EINVAL;
+       }
+
+       ipu_plane->stride[0] = fb->pitches[0];
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               /*
+                * Multiplanar formats have to meet the following restrictions:
+                * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+                * - EBA, UBO and VBO are a multiple of 8
+                * - UBO and VBO are unsigned and not larger than 0xfffff8
+                * - Only EBA may be changed while scanout is active
+                * - The strides of U and V planes must be identical.
+                */
+               ubo = cma_obj[1]->paddr + fb->offsets[1] +
+                     fb->pitches[1] * y / 2 + x / 2 - eba;
+               vbo = cma_obj[2]->paddr + fb->offsets[2] +
+                     fb->pitches[2] * y / 2 + x / 2 - eba;
+
+               if ((ubo & 0x7) || (vbo & 0x7)) {
+                       DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
+                       return -EINVAL;
+               }
+
+               if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
+                       DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
+                       return -EINVAL;
+               }
+
+               if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
+                                          (ipu_plane->v_offset != vbo))) {
+                       DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
+                       return -EINVAL;
+               }
+
+               if (fb->pitches[1] != fb->pitches[2]) {
+                       DRM_DEBUG_KMS("U/V pitches must be identical.\n");
+                       return -EINVAL;
+               }
+
+               if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
+                       DRM_DEBUG_KMS("U/V pitches out of range.\n");
+                       return -EINVAL;
+               }
+
+               if (ipu_plane->enabled &&
+                   (ipu_plane->stride[1] != fb->pitches[1])) {
+                       DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
+                       return -EINVAL;
+               }
+
+               ipu_plane->u_offset = ubo;
+               ipu_plane->v_offset = vbo;
+               ipu_plane->stride[1] = fb->pitches[1];
+
+               dev_dbg(ipu_plane->base.dev->dev,
+                       "phys = %pad %pad %pad, x = %d, y = %d",
+                       &cma_obj[0]->paddr, &cma_obj[1]->paddr,
+                       &cma_obj[2]->paddr, x, y);
+               break;
+       default:
+               dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+                       &cma_obj[0]->paddr, x, y);
+               break;
+       }
+
        if (ipu_plane->enabled) {
                active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
                }
        }
 
-       ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
-       if (ret) {
-               dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
-               return ret;
-       }
-
        ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
                        calc_bandwidth(crtc_w, crtc_h,
                                       calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
                return ret;
        }
 
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
+
        ipu_cpmem_zero(ipu_plane->ipu_ch);
        ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
        ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
        if (interlaced)
                ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
 
+       if (fb->pixel_format == DRM_FORMAT_YUV420) {
+               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                             ipu_plane->stride[1],
+                                             ipu_plane->u_offset,
+                                             ipu_plane->v_offset);
+       } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
+               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                             ipu_plane->stride[1],
+                                             ipu_plane->v_offset,
+                                             ipu_plane->u_offset);
+       }
+
        ipu_plane->w = src_w;
        ipu_plane->h = src_h;
 
index 3a443b413c60caa9734883f7aaa95a69bb7f3e29..4448fd4ad4eb503422a750f374c6b20a64b9f643 100644 (file)
@@ -29,6 +29,10 @@ struct ipu_plane {
        int                     w;
        int                     h;
 
+       unsigned int            u_offset;
+       unsigned int            v_offset;
+       unsigned int            stride[2];
+
        bool                    enabled;
 };
 
index 16641cec18a28dbb1a5af0dd344313eea56b7646..b5370cb56e3c2c4fbb539ffd9f0156c569af38c1 100644 (file)
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
 
        struct reset_control *rst;
        struct clk *clk;
+       struct clk *clk_ref;
        struct clk *clk_pwr;
 
        struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
         * bypassed). A value of 0 means an IOMMU is never used.
         */
        u8 iommu_bit;
+       /*
+        * Whether the chip requires a reference clock
+        */
+       bool require_ref_clk;
 };
 
 int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
index ae96ebc490fb2b80cbed26bacca113325c52fc82..e81aefe5ffa7c3c278cdda8253cdd228f228369e 100644 (file)
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
                break;
        default:
                if (disp->dithering_mode) {
+                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_mode,
                                                   nv_connector->
                                                   dithering_mode);
-                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                }
                if (disp->dithering_depth) {
+                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_depth,
                                                   nv_connector->
                                                   dithering_depth);
-                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                }
                break;
        }
index 2dfe58af12e4ee05103d14946dc4a0d001f7f9c6..4c4cc2260257cc1f8fb84f954ab6449d6f11c277 100644 (file)
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
        .iommu_bit = 34,
 };
 
+static const struct nvkm_device_tegra_func gm20b_platform_data = {
+       .iommu_bit = 34,
+       .require_ref_clk = true,
+};
+
 static const struct of_device_id nouveau_platform_match[] = {
        {
                .compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
        },
        {
                .compatible = "nvidia,gm20b",
-               .data = &gk20a_platform_data,
+               .data = &gm20b_platform_data,
        },
        { }
 };
index 9afa5f3e3c1c2d23e57ba076025bc8de1c2d2e25..ec12efb4689a7ea8a50fd7ee8626f5df4440f0a3 100644 (file)
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
        ret = clk_prepare_enable(tdev->clk);
        if (ret)
                goto err_clk;
+       if (tdev->clk_ref) {
+               ret = clk_prepare_enable(tdev->clk_ref);
+               if (ret)
+                       goto err_clk_ref;
+       }
        ret = clk_prepare_enable(tdev->clk_pwr);
        if (ret)
                goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
 err_clamp:
        clk_disable_unprepare(tdev->clk_pwr);
 err_clk_pwr:
+       if (tdev->clk_ref)
+               clk_disable_unprepare(tdev->clk_ref);
+err_clk_ref:
        clk_disable_unprepare(tdev->clk);
 err_clk:
        regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
        udelay(10);
 
        clk_disable_unprepare(tdev->clk_pwr);
+       if (tdev->clk_ref)
+               clk_disable_unprepare(tdev->clk_ref);
        clk_disable_unprepare(tdev->clk);
        udelay(10);
 
@@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
                goto free;
        }
 
+       if (func->require_ref_clk)
+               tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
+       if (IS_ERR(tdev->clk_ref)) {
+               ret = PTR_ERR(tdev->clk_ref);
+               goto free;
+       }
+
        tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
        if (IS_ERR(tdev->clk_pwr)) {
                ret = PTR_ERR(tdev->clk_pwr);
index c56a886229f196e86340a726e5b64431ba30dac2..b2de290da16feedf3834f31a32879ab315a586b5 100644 (file)
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
 
        gf100_gr_mmio(gr, gr->func->mmio);
 
+       nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
        memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
        for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
                do {
index 43e5f503d1c5c4dd9662a7b20aec4db5c061817d..030409a3ee4e014ab512b5141d4b4e0ededbfe5d 100644 (file)
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
 
        qxl_bo_kunmap(user_bo);
 
+       qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+       qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+       qcrtc->hot_spot_x = hot_x;
+       qcrtc->hot_spot_y = hot_y;
+
        cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_CURSOR_SET;
-       cmd->u.set.position.x = qcrtc->cur_x;
-       cmd->u.set.position.y = qcrtc->cur_y;
+       cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+       cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
 
        cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
 
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
 
        cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_CURSOR_MOVE;
-       cmd->u.position.x = qcrtc->cur_x;
-       cmd->u.position.y = qcrtc->cur_y;
+       cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+       cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
index 6e6b9b1519b8edd376fbed8341dbe99374dba8cf..3f3897eb458ce326f39a32d6b324c4361d70747d 100644 (file)
@@ -135,6 +135,8 @@ struct qxl_crtc {
        int index;
        int cur_x;
        int cur_y;
+       int hot_spot_x;
+       int hot_spot_y;
 };
 
 struct qxl_output {
index cf61e0856f4af2faa699118cf08bbf097cc24763..b80b08f71cb46e8d69d7bd94f6d951008267500e 100644 (file)
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
                if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
                        atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
                atombios_blank_crtc(crtc, ATOM_DISABLE);
-               drm_vblank_on(dev, radeon_crtc->crtc_id);
+               if (dev->num_crtcs > radeon_crtc->crtc_id)
+                       drm_vblank_on(dev, radeon_crtc->crtc_id);
                radeon_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               drm_vblank_off(dev, radeon_crtc->crtc_id);
+               if (dev->num_crtcs > radeon_crtc->crtc_id)
+                       drm_vblank_off(dev, radeon_crtc->crtc_id);
                if (radeon_crtc->enabled)
                        atombios_blank_crtc(crtc, ATOM_ENABLE);
                if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
index edd05cdb0cd843dfe9ee53ecbac7f4a8908d3dd2..587cae4e73c9abeeb65d2187b9e700bb8f0fa442 100644 (file)
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
                radeon_panel_mode_fixup(encoder, adjusted_mode);
index 76c4bdf21b2071043657aeebe152a06cd19600f3..34f7a29d9366755c12d9d270ce4c170c67219180 100644 (file)
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
        WREG32(VM_CONTEXT1_CNTL, 0);
 }
 
+static const unsigned ni_dig_offsets[] =
+{
+       NI_DIG0_REGISTER_OFFSET,
+       NI_DIG1_REGISTER_OFFSET,
+       NI_DIG2_REGISTER_OFFSET,
+       NI_DIG3_REGISTER_OFFSET,
+       NI_DIG4_REGISTER_OFFSET,
+       NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+       NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+       NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+       NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+       NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+       NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+       NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+       EVERGREEN_DP0_REGISTER_OFFSET,
+       EVERGREEN_DP1_REGISTER_OFFSET,
+       EVERGREEN_DP2_REGISTER_OFFSET,
+       EVERGREEN_DP3_REGISTER_OFFSET,
+       EVERGREEN_DP4_REGISTER_OFFSET,
+       EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible  since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+                                              unsigned crtc_id, unsigned *ret_dig_fe)
+{
+       unsigned i;
+       unsigned dig_fe;
+       unsigned dig_be;
+       unsigned dig_en_be;
+       unsigned uniphy_pll;
+       unsigned digs_fe_selected;
+       unsigned dig_be_mode;
+       unsigned dig_fe_mask;
+       bool is_enabled = false;
+       bool found_crtc = false;
+
+       /* loop through all running dig_fe to find selected crtc */
+       for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+               dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+               if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+                   crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+                       /* found running pipe */
+                       found_crtc = true;
+                       dig_fe_mask = 1 << i;
+                       dig_fe = i;
+                       break;
+               }
+       }
+
+       if (found_crtc) {
+               /* loop through all running dig_be to find selected dig_fe */
+               for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+                       dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+                       /* if dig_fe_selected by dig_be? */
+                       digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+                       dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+                       if (dig_fe_mask &  digs_fe_selected &&
+                           /* if dig_be in sst mode? */
+                           dig_be_mode == NI_DIG_BE_DPSST) {
+                               dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+                                                  ni_dig_offsets[i]);
+                               uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+                                                   ni_tx_offsets[i]);
+                               /* dig_be enable and tx is running */
+                               if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+                                   dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+                                   uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+                                       is_enabled = true;
+                                       *ret_dig_fe = dig_fe;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+                                     unsigned dig_fe)
+{
+       unsigned stream_ctrl;
+       unsigned fifo_ctrl;
+       unsigned counter = 0;
+
+       if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+               DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+               return;
+       }
+
+       stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+                            evergreen_dp_offsets[dig_fe]);
+       if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+               DRM_ERROR("dig %d , should be enable\n", dig_fe);
+               return;
+       }
+
+       stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+       WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+              evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+       stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+                            evergreen_dp_offsets[dig_fe]);
+       while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+               msleep(1);
+               counter++;
+               stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+                                    evergreen_dp_offsets[dig_fe]);
+       }
+       if (counter >= 32 )
+               DRM_ERROR("counter exceeds %d\n", counter);
+
+       fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+       fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+       WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
        u32 crtc_enabled, tmp, frame_count, blackout;
        int i, j;
+       unsigned dig_fe;
 
        if (!ASIC_IS_NODCE(rdev)) {
                save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        break;
                                udelay(1);
                        }
-
+                       /*we should disable dig if it drives dp sst*/
+                       /*but we are in radeon_device_init and the topology is unknown*/
+                       /*and it is available after radeon_modeset_init*/
+                       /*the following method radeon_atom_encoder_dpms_dig*/
+                       /*does the job if we initialize it properly*/
+                       /*for now we do it this manually*/
+                       /**/
+                       if (ASIC_IS_DCE5(rdev) &&
+                           evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+                               evergreen_blank_dp_output(rdev, dig_fe);
+                       /*we could remove 6 lines below*/
                        /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                        tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index aa939dfed3a36586fc814f7077ef24332ab2b684..b436badf9efa356e00b80adee5cda4f191c5b537 100644 (file)
 
 /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
 #define EVERGREEN_HDMI_BASE                            0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL                               0x7000
+#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
+#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
+
+
+#define NI_DIG_BE_CNTL                    0x7140
+#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
+#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL                              0x7144
+#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
+#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
+#       define NI_DIG_BE_DPSST 0
 
 /* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
+#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
+#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO                         0x7310
+#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
 #define EVERGREEN_DP_SEC_CNTL                           0x7280
 #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
 #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
 #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
 #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
 
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
+#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
+
 #endif
index da310a70c0f01b880f8bac9bf7bb2abe6da5d7a1..827ccc87cbc340bf4132a792f6436005f11111d6 100644 (file)
 #define NI_DP_MSE_SAT2                                 0x7398
 
 #define NI_DP_MSE_SAT_UPDATE                           0x739c
+#       define NI_DP_MSE_SAT_UPDATE_MASK               0x3
+#       define NI_DP_MSE_16_MTP_KEEPOUT                0x100
 
 #define NI_DIG_BE_CNTL                                 0x7140
 #       define NI_DIG_FE_SOURCE_SELECT(x)              (((x) & 0x7f) << 8)
index fd8c4d317e60c12c164b49b4142a6c6e3d7dabf4..95f4fea893021cb8f233244404fed6878742dd3b 100644 (file)
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
        return radeon_atpx_priv.atpx_detected;
 }
 
-bool radeon_has_atpx_dgpu_power_cntl(void) {
-       return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
+       /* make sure required functions are enabled */
+       /* dGPU power control is required */
+       if (atpx->functions.power_cntl == false) {
+               printk("ATPX dGPU power cntl not present, forcing\n");
+               atpx->functions.power_cntl = true;
+       }
+
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
index cfcc099c537d0475da7eb6d6ce4576cc7a116b98..81a63d7f5cd9df6e41ef7748d4635a2cddf5b508 100644 (file)
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                   rdev->mode_info.dither_property,
                                                   RADEON_FMT_DITHER_DISABLE);
 
-                       if (radeon_audio != 0)
+                       if (radeon_audio != 0) {
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
+                       }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
index 4fd1a961012d8ff314386765edf72d0662660fe8..d0826fb0434c45f35647c5a41288c17634c3c95f 100644 (file)
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
        "LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
        }
        rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
 
-       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
-               radeon_family_name[rdev->family], pdev->vendor, pdev->device,
-               pdev->subsystem_vendor, pdev->subsystem_device);
+       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+                radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+                pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
 
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
         * ignore it */
        vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 
-       if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+       if (rdev->flags & RADEON_IS_PX)
                runtime = true;
        vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
        if (runtime)
index 43cffb526b0c604adbab9722f9e63501f51412db..de504ea29c06d355758e79c6ca18345f42e55f88 100644 (file)
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
        WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
 
        do {
+               unsigned value1, value2;
+               udelay(10);
                temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
-       } while ((temp & 0x1) && retries++ < 10000);
+
+               value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+               value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+               if (!value1 && !value2)
+                       break;
+       } while (retries++ < 50);
 
        if (retries == 10000)
                DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
        return 0;
 }
 
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
 {
        struct drm_device *dev = mst->base.dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
        uint32_t val, temp;
        uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
        int retries = 0;
+       uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+       uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
 
        val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
 
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
 
        do {
                temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+               udelay(10);
        } while ((temp & 0x1) && (retries++ < 10000));
 
        if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
        kfree(radeon_connector);
 }
 
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
-       DRM_DEBUG_KMS("\n");
-       return 0;
-}
-
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
-       .dpms = radeon_connector_dpms,
+       .dpms = drm_helper_connector_dpms,
        .detect = radeon_dp_mst_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
        int ret, slots;
-
+       s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
        if (!ASIC_IS_DCE5(rdev)) {
                DRM_ERROR("got mst dpms on non-DCE5\n");
                return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
 
                mst_enc->enc_active = true;
                radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-               radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+               fixed_pbn = drm_int2fixp(mst_enc->pbn);
+               fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+               avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+               radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
 
                atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
                                            mst_enc->fe);
index 979f3bf65f2c474f231a26eb47af963d4436be0f..1e9304d1c88fd4e844ee9c5bd36300ed992e0f29 100644 (file)
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
        if (r) {
                return r;
        }
+       rdev->ddev->vblank_disable_allowed = true;
+
        /* enable msi */
        rdev->msi_enabled = 0;
 
index 24152dfef19985bccc02b869853dd66f77d12e9e..478d4099b0d0e2b2d995d47bb74322ab46adf73b 100644 (file)
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
                                                                         RADEON_CRTC_DISP_REQ_EN_B));
                        WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
                }
-               drm_vblank_on(dev, radeon_crtc->crtc_id);
+               if (dev->num_crtcs > radeon_crtc->crtc_id)
+                       drm_vblank_on(dev, radeon_crtc->crtc_id);
                radeon_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               drm_vblank_off(dev, radeon_crtc->crtc_id);
+               if (dev->num_crtcs > radeon_crtc->crtc_id)
+                       drm_vblank_off(dev, radeon_crtc->crtc_id);
                if (radeon_crtc->crtc_id)
                        WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
                else {
index c008312e1bcdf44fe18285e906f358aa567c8c5a..90f739478a1b5257b488d120245d77d4cf561a70 100644 (file)
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 
+       if (radeon_ttm_tt_has_userptr(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
@@ -615,7 +617,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        sg_free_table(ttm->sg);
index af4df81c4e0c79c721dca5dc610da235d53651f9..e6abc09b67e3e63e82fea3bbda2b1c91234c525c 100644 (file)
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index 4cbf26555093f3d13ed8c917a34bf7e0302bdb8b..e3daafa1be1322222a038adb450b90bc8b7be695 100644 (file)
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_mem_type_manager *man;
+       int put_count = 0;
 
        lockdep_assert_held(&bo->resv->lock.base);
 
-       if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-               list_del_init(&bo->swap);
-               list_del_init(&bo->lru);
-
-       } else {
-               if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
-                       list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
-               man = &bdev->man[bo->mem.mem_type];
-               list_move_tail(&bo->lru, &man->lru);
-       }
+       put_count = ttm_bo_del_from_lru(bo);
+       ttm_bo_list_ref_sub(bo, put_count, true);
+       ttm_bo_add_to_lru(bo);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
index 4e19d0f9cc3094ae9585eef447c2439589a4a304..077ae9b2865dcda4bb8d9e774a8b4a5cf498bc84 100644 (file)
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
                        goto out_err;
 
                copy_highpage(to_page, from_page);
-               page_cache_release(from_page);
+               put_page(from_page);
        }
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                copy_highpage(to_page, from_page);
                set_page_dirty(to_page);
                mark_page_accessed(to_page);
-               page_cache_release(to_page);
+               put_page(to_page);
        }
 
        ttm_tt_unpopulate(ttm);
index e797dfc07ae3fb3ffe5ec036eb6e2d78f09a7cbb..7e2a12c4fed2a49bb5f35714e8ef42f24cf8f7d1 100644 (file)
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
                        if (NULL != (page = vsg->pages[i])) {
                                if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
                                        SetPageDirty(page);
-                               page_cache_release(page);
+                               put_page(page);
                        }
                }
        case dr_via_pages_alloc:
index 4854dac87e246e499002b0a71d8706d65ab1a15d..5fd1fd06effc3567dd39740e837c4fdd1a4f59dc 100644 (file)
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+                                        struct drm_crtc_state *old_state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       if (crtc->state->event)
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
 static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
        .enable        = virtio_gpu_crtc_enable,
        .disable       = virtio_gpu_crtc_disable,
        .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
        .atomic_check  = virtio_gpu_crtc_atomic_check,
+       .atomic_flush  = virtio_gpu_crtc_atomic_flush,
 };
 
 static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
index 723ba16c60844348f8627231912027fa3b85be9d..1a1a87cbf1097988bc60f91bd6debb872211908d 100644 (file)
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
                    &vmw_cmd_dx_cid_check, true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
                    true, false, true),
-       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
                    true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
                    true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
-                   &vmw_cmd_ok, true, false, true),
-       VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+                   &vmw_cmd_dx_cid_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
                    true, false, true),
-       VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
                    true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
                    true, false, true),
-       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
                    true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
                    true, false, true),
index 3b1faf7862a55f2d400ce7f80444a7388bdb2310..679a4cb98ee306bb47c5c0097f1aa829c17a7fc8 100644 (file)
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
                mode = old_mode;
                old_mode = NULL;
        } else if (!vmw_kms_validate_mode_vram(vmw_priv,
-                                              mode->hdisplay *
-                                              (var->bits_per_pixel + 7) / 8,
-                                              mode->vdisplay)) {
+                                       mode->hdisplay *
+                                       DIV_ROUND_UP(var->bits_per_pixel, 8),
+                                       mode->vdisplay)) {
                drm_mode_destroy(vmw_priv->dev, mode);
                return -EINVAL;
        }
index e00db3f510dd425c62565d913c937c5638a072d5..abb98c77bad25c6901b910095068a3aec5059311 100644 (file)
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        goto err_register;
                }
 
-               pdev->dev.of_node = of_node;
                pdev->dev.parent = dev;
 
                ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        platform_device_put(pdev);
                        goto err_register;
                }
+
+               /*
+                * Set of_node only after calling platform_device_add. Otherwise
+                * the platform:imx-ipuv3-crtc modalias won't be used.
+                */
+               pdev->dev.of_node = of_node;
        }
 
        return 0;
index 883a314cd83ae5841dc2fd14ba03bd9228297a09..6494a4d2817149e06e1ab241ccb10552d52dc553 100644 (file)
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
 
 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
-                                  u32 pixel_format, int stride,
-                                  int u_offset, int v_offset)
+                                  unsigned int uv_stride,
+                                  unsigned int u_offset, unsigned int v_offset)
 {
-       switch (pixel_format) {
-       case V4L2_PIX_FMT_YUV420:
-       case V4L2_PIX_FMT_YUV422P:
-               ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
-               ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
-               ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
-               break;
-       case V4L2_PIX_FMT_YVU420:
-               ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
-               ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
-               ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
-               break;
-       case V4L2_PIX_FMT_NV12:
-       case V4L2_PIX_FMT_NV16:
-               ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
-               ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
-               ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
-               break;
-       }
+       ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+       ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+       ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
 
 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
                              u32 pixel_format, int stride, int height)
 {
-       int u_offset, v_offset;
+       int fourcc, u_offset, v_offset;
        int uv_stride = 0;
 
-       switch (pixel_format) {
-       case V4L2_PIX_FMT_YUV420:
-       case V4L2_PIX_FMT_YVU420:
+       fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
+       switch (fourcc) {
+       case DRM_FORMAT_YUV420:
                uv_stride = stride / 2;
                u_offset = stride * height;
                v_offset = u_offset + (uv_stride * height / 2);
-               ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-                                             u_offset, v_offset);
                break;
-       case V4L2_PIX_FMT_YUV422P:
+       case DRM_FORMAT_YVU420:
+               uv_stride = stride / 2;
+               v_offset = stride * height;
+               u_offset = v_offset + (uv_stride * height / 2);
+               break;
+       case DRM_FORMAT_YUV422:
                uv_stride = stride / 2;
                u_offset = stride * height;
                v_offset = u_offset + (uv_stride * height);
-               ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-                                             u_offset, v_offset);
                break;
-       case V4L2_PIX_FMT_NV12:
-       case V4L2_PIX_FMT_NV16:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               uv_stride = stride;
                u_offset = stride * height;
-               ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-                                             u_offset, 0);
+               v_offset = 0;
                break;
+       default:
+               return;
        }
+       ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
 
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
 
        switch (pix->pixelformat) {
        case V4L2_PIX_FMT_YUV420:
-       case V4L2_PIX_FMT_YVU420:
                offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
                u_offset = U_OFFSET(pix, image->rect.left,
                                    image->rect.top) - offset;
                v_offset = V_OFFSET(pix, image->rect.left,
                                    image->rect.top) - offset;
 
-               ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-                                             pix->bytesperline,
+               ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
                                              u_offset, v_offset);
                break;
+       case V4L2_PIX_FMT_YVU420:
+               offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+               u_offset = U_OFFSET(pix, image->rect.left,
+                                   image->rect.top) - offset;
+               v_offset = V_OFFSET(pix, image->rect.left,
+                                   image->rect.top) - offset;
+
+               ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+                                             v_offset, u_offset);
+               break;
        case V4L2_PIX_FMT_YUV422P:
                offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
                u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
                v_offset = V2_OFFSET(pix, image->rect.left,
                                     image->rect.top) - offset;
 
-               ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-                                             pix->bytesperline,
+               ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
                                              u_offset, v_offset);
                break;
        case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
                                     image->rect.top) - offset;
                v_offset = 0;
 
-               ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-                                             pix->bytesperline,
+               ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
                                              u_offset, v_offset);
                break;
        case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
                                      image->rect.top) - offset;
                v_offset = 0;
 
-               ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-                                             pix->bytesperline,
+               ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
                                              u_offset, v_offset);
                break;
        case V4L2_PIX_FMT_UYVY:
index 042c3958e2a099224b25cafd194e076b7150f67c..837b1ec228005ebf0b36a865d4f992588229dc0a 100644 (file)
@@ -350,11 +350,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
 
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
 {
        struct ipu_dmfc_priv *priv = dmfc->priv;
        u32 dmfc_gen1;
 
+       mutex_lock(&priv->mutex);
+
        dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
 
        if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
 
        writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
 
-       return 0;
+       mutex_unlock(&priv->mutex);
 }
-EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
 
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
 {
index bdb8cc89cacc73ea6442c9cf84de4180ded70772..4f9c5c6deaed189f6be5a34145a2ec5cf3fa40b7 100644 (file)
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
index 5c0e43ed5c53ca28e554b7d833b093f545b8b440..0238f0169e48f5d45b345498c5ba713c8cb9a96d 100644 (file)
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
 
 #define USB_VENDOR_ID_CREATIVELABS     0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51     0x322c
 #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
 
 #define USB_VENDOR_ID_CVTOUCH          0x1ff7
 #define USB_DEVICE_ID_SIDEWINDER_GV    0x003b
 #define USB_DEVICE_ID_MS_OFFICE_KB     0x0048
 #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
 #define USB_DEVICE_ID_MS_NE4K          0x00db
 #define USB_DEVICE_ID_MS_NE4K_JP       0x00dc
 #define USB_DEVICE_ID_MS_LK6K          0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB      0x0713
 #define USB_DEVICE_ID_MS_NE7K          0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K      0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600  0x0750
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500    0x076c
 #define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
index 0125e356bd8d29c0ad51b22d604f1df00f0f2cd7..1ac4ff4d57a659fc89c6a2bf36b83ba2d679fdcc 100644 (file)
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
                        unsigned char byte2, unsigned char byte3)
 {
        int ret;
-       unsigned char buf[] = {0x18, byte2, byte3};
+       unsigned char *buf;
+
+       buf = kzalloc(3, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = 0x18;
+       buf[1] = byte2;
+       buf[2] = byte3;
 
        switch (hdev->product) {
        case USB_DEVICE_ID_LENOVO_CUSBKBD:
-               ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+               ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
                                        HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
                break;
        case USB_DEVICE_ID_LENOVO_CBTKBD:
-               ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+               ret = hid_hw_output_report(hdev, buf, 3);
                break;
        default:
                ret = -EINVAL;
                break;
        }
 
+       kfree(buf);
+
        return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
 }
 
index 75cd3bc59c549a2be01c8092412dafd24abf8c61..e924d555536cf7fb1b221af4c15e51112df107c8 100644 (file)
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_PRESENTER },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
                .driver_data = MS_ERGONOMY | MS_RDESC_3K },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+               .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+               .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+               .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
                .driver_data = MS_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
index 25d3c4330bf69bbca32efb3883628f3af6a28875..c741f5e50a6681cd0b6267fc04c5ca11f1b0a1b5 100644 (file)
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
                                                           MT_TOOL_FINGER,
                                                           false);
                        }
+                       input_mt_sync_frame(input_dev);
                        input_sync(input_dev);
                }
        }
index 4390eee2ce84977427e7c3759f9e481895037858..c830ed39348f7cdba2b06b9dda01616e9405335a 100644 (file)
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
         *   -----+------------------------------+-----+-----+
         * The single bits Yaw, Roll, Pitch in the lower right corner specify
         * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
-        * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
-        * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
-        * and 9 for slow.
+        * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+        * units / deg/s. To get a linear scale for fast rotation we multiply
+        * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+        * previous scale reported by this driver.
+        * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
         * If the wiimote is not rotating the sensor reports 2^13 = 8192.
         * Ext specifies whether an extension is connected to the motionp.
         * which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
        z -= 8192;
 
        if (!(ext[3] & 0x02))
-               x *= 18;
+               x = (x * 2000 * 9) / 440;
        else
                x *= 9;
        if (!(ext[4] & 0x02))
-               y *= 18;
+               y = (y * 2000 * 9) / 440;
        else
                y *= 9;
        if (!(ext[3] & 0x01))
-               z *= 18;
+               z = (z * 2000 * 9) / 440;
        else
                z *= 9;
 
index ad71160b9ea4341309030db0aea3aa2213ad71d3..ae83af649a607f67239f1a64bf45dd4b5770cc7d 100644 (file)
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
        return ret;
 }
 
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
-       if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
-               usbhid_restart_out_queue(usbhid);
-       if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
-               usbhid_restart_ctrl_queue(usbhid);
-}
-
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
        usb_kill_urb(usbhid->urbout);
 }
 
+static void hid_restart_io(struct hid_device *hid)
+{
+       struct usbhid_device *usbhid = hid->driver_data;
+       int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+       int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+       spin_lock_irq(&usbhid->lock);
+       clear_bit(HID_SUSPENDED, &usbhid->iofl);
+       usbhid_mark_busy(usbhid);
+
+       if (clear_halt || reset_pending)
+               schedule_work(&usbhid->reset_work);
+       usbhid->retry_delay = 0;
+       spin_unlock_irq(&usbhid->lock);
+
+       if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+               return;
+
+       if (!clear_halt) {
+               if (hid_start_in(hid) < 0)
+                       hid_io_error(hid);
+       }
+
+       spin_lock_irq(&usbhid->lock);
+       if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+               usbhid_restart_out_queue(usbhid);
+       if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+               usbhid_restart_ctrl_queue(usbhid);
+       spin_unlock_irq(&usbhid->lock);
+}
+
 /* Treat USB reset pretty much the same as suspend/resume */
 static int hid_pre_reset(struct usb_interface *intf)
 {
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
                return 1;
        }
 
+       /* No need to do another reset or clear a halted endpoint */
        spin_lock_irq(&usbhid->lock);
        clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+       clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
        hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
-       status = hid_start_in(hid);
-       if (status < 0)
-               hid_io_error(hid);
-       usbhid_restart_queues(usbhid);
+
+       hid_restart_io(hid);
 
        return 0;
 }
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
 #ifdef CONFIG_PM
 static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
 {
-       struct usbhid_device *usbhid = hid->driver_data;
-       int status;
-
-       spin_lock_irq(&usbhid->lock);
-       clear_bit(HID_SUSPENDED, &usbhid->iofl);
-       usbhid_mark_busy(usbhid);
-
-       if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
-                       test_bit(HID_RESET_PENDING, &usbhid->iofl))
-               schedule_work(&usbhid->reset_work);
-       usbhid->retry_delay = 0;
-
-       usbhid_restart_queues(usbhid);
-       spin_unlock_irq(&usbhid->lock);
-
-       status = hid_start_in(hid);
-       if (status < 0)
-               hid_io_error(hid);
+       int status = 0;
 
+       hid_restart_io(hid);
        if (driver_suspended && hid->driver && hid->driver->resume)
                status = hid->driver->resume(hid);
        return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
 static int hid_resume(struct usb_interface *intf)
 {
        struct hid_device *hid = usb_get_intfdata (intf);
-       struct usbhid_device *usbhid = hid->driver_data;
        int status;
 
-       if (!test_bit(HID_STARTED, &usbhid->iofl))
-               return 0;
-
        status = hid_resume_common(hid, true);
        dev_dbg(&intf->dev, "resume status %d\n", status);
        return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
 static int hid_reset_resume(struct usb_interface *intf)
 {
        struct hid_device *hid = usb_get_intfdata(intf);
-       struct usbhid_device *usbhid = hid->driver_data;
        int status;
 
-       clear_bit(HID_SUSPENDED, &usbhid->iofl);
        status = hid_post_reset(intf);
        if (status >= 0 && hid->driver && hid->driver->reset_resume) {
                int ret = hid->driver->reset_resume(hid);
index ed2f68edc8f1c648e30b15178fc550cb9a071f88..53fc856d6867b032e6e2100c1c87c3bf5733702c 100644 (file)
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
index 68a560957871c5a497e4e3d0229ca03dc359d8da..ccf1883318c37ff401c08a147d365ab2fbb0d5df 100644 (file)
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
                hid_data->inputmode = field->report->id;
                hid_data->inputmode_index = usage->usage_index;
                break;
+
+       case HID_UP_DIGITIZER:
+               if (field->report->id == 0x0B &&
+                   (field->application == WACOM_G9_DIGITIZER ||
+                    field->application == WACOM_G11_DIGITIZER)) {
+                       wacom->wacom_wac.mode_report = field->report->id;
+                       wacom->wacom_wac.mode_value = 0;
+               }
+               break;
+
+       case WACOM_G9_PAGE:
+       case WACOM_G11_PAGE:
+               if (field->report->id == 0x03 &&
+                   (field->application == WACOM_G9_TOUCHSCREEN ||
+                    field->application == WACOM_G11_TOUCHSCREEN)) {
+                       wacom->wacom_wac.mode_report = field->report->id;
+                       wacom->wacom_wac.mode_value = 0;
+               }
+               break;
        }
 }
 
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
        return 0;
 }
 
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
-               int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+                                struct wacom_wac *wacom_wac)
 {
-       unsigned char *rep_data;
+       u8 *rep_data;
+       struct hid_report *r;
+       struct hid_report_enum *re;
+       int length;
        int error = -ENOMEM, limit = 0;
 
-       rep_data = kzalloc(length, GFP_KERNEL);
+       if (wacom_wac->mode_report < 0)
+               return 0;
+
+       re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+       r = re->report_id_hash[wacom_wac->mode_report];
+       if (!r)
+               return -EINVAL;
+
+       rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
        if (!rep_data)
-               return error;
+               return -ENOMEM;
+
+       length = hid_report_len(r);
 
        do {
-               rep_data[0] = report_id;
-               rep_data[1] = mode;
+               rep_data[0] = wacom_wac->mode_report;
+               rep_data[1] = wacom_wac->mode_value;
 
                error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
                                         length, 1);
                if (error >= 0)
                        error = wacom_get_report(hdev, HID_FEATURE_REPORT,
                                                 rep_data, length, 1);
-       } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+       } while (error >= 0 &&
+                rep_data[1] != wacom_wac->mode_report &&
+                limit++ < WAC_MSG_RETRIES);
 
        kfree(rep_data);
 
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
 static int wacom_query_tablet_data(struct hid_device *hdev,
                struct wacom_features *features)
 {
+       struct wacom *wacom = hid_get_drvdata(hdev);
+       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
        if (hdev->bus == BUS_BLUETOOTH)
                return wacom_bt_query_tablet_data(hdev, 1, features);
 
-       if (features->type == HID_GENERIC)
-               return wacom_hid_set_device_mode(hdev);
-
-       if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
-               if (features->type > TABLETPC) {
-                       /* MT Tablet PC touch */
-                       return wacom_set_device_mode(hdev, 3, 4, 4);
-               }
-               else if (features->type == WACOM_24HDT) {
-                       return wacom_set_device_mode(hdev, 18, 3, 2);
-               }
-               else if (features->type == WACOM_27QHDT) {
-                       return wacom_set_device_mode(hdev, 131, 3, 2);
-               }
-               else if (features->type == BAMBOO_PAD) {
-                       return wacom_set_device_mode(hdev, 2, 2, 2);
-               }
-       } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
-               if (features->type <= BAMBOO_PT) {
-                       return wacom_set_device_mode(hdev, 2, 2, 2);
+       if (features->type != HID_GENERIC) {
+               if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+                       if (features->type > TABLETPC) {
+                               /* MT Tablet PC touch */
+                               wacom_wac->mode_report = 3;
+                               wacom_wac->mode_value = 4;
+                       } else if (features->type == WACOM_24HDT) {
+                               wacom_wac->mode_report = 18;
+                               wacom_wac->mode_value = 2;
+                       } else if (features->type == WACOM_27QHDT) {
+                               wacom_wac->mode_report = 131;
+                               wacom_wac->mode_value = 2;
+                       } else if (features->type == BAMBOO_PAD) {
+                               wacom_wac->mode_report = 2;
+                               wacom_wac->mode_value = 2;
+                       }
+               } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+                       if (features->type <= BAMBOO_PT) {
+                               wacom_wac->mode_report = 2;
+                               wacom_wac->mode_value = 2;
+                       }
                }
        }
 
+       wacom_set_device_mode(hdev, wacom_wac);
+
+       if (features->type == HID_GENERIC)
+               return wacom_hid_set_device_mode(hdev);
+
        return 0;
 }
 
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
                goto fail_type;
        }
 
+       wacom_wac->hid_data.inputmode = -1;
+       wacom_wac->mode_report = -1;
+
        wacom->usbdev = dev;
        wacom->intf = intf;
        mutex_init(&wacom->lock);
index bd198bbd4df0841b5a3596c09ecac822ecda40e5..cf2ba43453fd4ecd9458075a9767676534a354fe 100644 (file)
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
 
                wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
 
+               wacom->shared->stylus_in_proximity = true;
                return 1;
        }
 
@@ -2425,6 +2426,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                }
        }
 
+       /*
+        * Hack for the Bamboo One:
+        * the device presents a PAD/Touch interface as most Bamboos and even
+        * sends ghosts PAD data on it. However, later, we must disable this
+        * ghost interface, and we can not detect it unless we set it here
+        * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+        */
+       if (features->type == BAMBOO_PEN &&
+           features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+               features->device_type |= WACOM_DEVICETYPE_PAD;
+
        /*
         * Raw Wacom-mode pen and touch events both come from interface
         * 0, whose HID descriptor has an application usage of 0xFF0D
@@ -3384,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
        { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
          INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+       { "Wacom DTK1651", 34616, 19559, 1023, 0,
+         DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+         WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC };
@@ -3549,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x33C) },
        { USB_DEVICE_WACOM(0x33D) },
        { USB_DEVICE_WACOM(0x33E) },
+       { USB_DEVICE_WACOM(0x343) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 25baa7f295997f65759d8da1c6dc239c01258156..e2084d914c14aa3902b42e2cb852517c31327256 100644 (file)
 #define WACOM_DEVICETYPE_WL_MONITOR     0x0008
 
 #define WACOM_VENDORDEFINED_PEN                0xff0d0001
+#define WACOM_G9_PAGE                  0xff090000
+#define WACOM_G9_DIGITIZER             (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN           (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE                 0xff110000
+#define WACOM_G11_DIGITIZER            (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN          (WACOM_G11_PAGE | 0x11)
 
 #define WACOM_PEN_FIELD(f)     (((f)->logical == HID_DG_STYLUS) || \
                                 ((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
        int ps_connected;
        u8 bt_features;
        u8 bt_high_speed;
+       int mode_report;
+       int mode_value;
        struct hid_data hid_data;
 };
 
index 5613e2b5cff7759861a1e2d7ad2748f7626d16da..a40a73a7b71da359574bf4061d80340b1caa30a4 100644 (file)
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
-                                     struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
 {
        u32 cur_write_sz;
        u32 r_size;
-       u32 write_loc = rbi->ring_buffer->write_index;
+       u32 write_loc;
        u32 read_loc = rbi->ring_buffer->read_index;
-       u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+       u32 pending_sz;
 
+       /*
+        * Issue a full memory barrier before making the signaling decision.
+        * Here is the reason for having this barrier:
+        * If the reading of the pend_sz (in this function)
+        * were to be reordered and read before we commit the new read
+        * index (in the calling function)  we could
+        * have a problem. If the host were to set the pending_sz after we
+        * have sampled pending_sz and go to sleep before we commit the
+        * read index, we could miss sending the interrupt. Issue a full
+        * memory barrier to address this.
+        */
+       mb();
+
+       pending_sz = rbi->ring_buffer->pending_send_sz;
+       write_loc = rbi->ring_buffer->write_index;
        /* If the other end is not blocked on write don't bother. */
        if (pending_sz == 0)
                return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
        cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
                        read_loc - write_loc;
 
-       if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+       if (cur_write_sz >= pending_sz)
                return true;
 
        return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        /* Update the read index */
        hv_set_next_read_location(inring_info, next_read_location);
 
-       *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+       *signal = hv_need_to_signal_on_read(inring_info);
 
        return ret;
 }
index faa8e6821fea00ba95c7a147cb02894550c459f7..0967e1a5b3a25f1d351f83adb40f7bad0c52c659 100644 (file)
@@ -975,10 +975,10 @@ config I2C_XLR
 
 config I2C_XLP9XX
        tristate "XLP9XX I2C support"
-       depends on CPU_XLP || COMPILE_TEST
+       depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
        help
          This driver enables support for the on-chip I2C interface of
-         the Broadcom XLP9xx/XLP5xx MIPS processors.
+         the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
 
          This driver can also be built as a module.  If so, the module will
          be called i2c-xlp9xx.
index 714bdc837769fdc74fdf9208c18d0568ecc653ae..b167ab25310a3b53797a8604d0adf360d91fb3e9 100644 (file)
@@ -116,8 +116,8 @@ struct cpm_i2c {
        cbd_t __iomem *rbase;
        u_char *txbuf[CPM_MAXBD];
        u_char *rxbuf[CPM_MAXBD];
-       u32 txdma[CPM_MAXBD];
-       u32 rxdma[CPM_MAXBD];
+       dma_addr_t txdma[CPM_MAXBD];
+       dma_addr_t rxdma[CPM_MAXBD];
 };
 
 static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
index b29c7500461a72c051b29dc2822f28afbf248b35..f54ece8fce781865336caca2e43abb301a69b62c 100644 (file)
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
                return -EIO;
        }
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_enable(i2c->clk);
+       if (ret)
+               return ret;
 
        for (i = 0; i < num; i++, msgs++) {
                stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
        }
 
  out:
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        return ret;
 }
 
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_prepare_enable(i2c->clk);
+       if (ret)
+               return ret;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, i2c);
 
+       clk_disable(i2c->clk);
+
+       return 0;
+
  err_clk:
        clk_disable_unprepare(i2c->clk);
        return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&i2c->adap);
 
+       clk_unprepare(i2c->clk);
+
        return 0;
 }
 
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
 
        i2c->suspended = 1;
 
+       clk_unprepare(i2c->clk);
+
        return 0;
 }
 
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
        struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
        int ret = 0;
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_prepare_enable(i2c->clk);
+       if (ret)
+               return ret;
 
        ret = exynos5_hsi2c_clock_setup(i2c);
        if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
        }
 
        exynos5_i2c_init(i2c);
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        i2c->suspended = 0;
 
        return 0;
index 7ba795b24e75d4d212972ddfa0675d582a9ef671..1c8707710098405ce485cf310d519bee20ff6966 100644 (file)
@@ -75,6 +75,7 @@
 /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
 #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT    0x19ac
 #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
 
 #define ISMT_DESC_ENTRIES      2       /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
 static const struct pci_device_id ismt_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
        { 0, }
 };
index f325663c27c532645a1901a385ac6d52543e5ba5..ba14a863b451f943ea862bb94d2f808554a397d0 100644 (file)
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
        ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
                                   &clk_freq);
        if (ret) {
-               dev_err(&pdev->dev, "clock-frequency not specified in DT");
+               dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
                goto err;
        }
 
        i2c->speed = clk_freq / 1000;
+       if (i2c->speed == 0) {
+               ret = -EINVAL;
+               dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
+               goto err;
+       }
        jz4780_i2c_set_speed(i2c);
 
        dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
index 9096d17beb5bb002d914010a77f156a3f3f97f32..3dcc5f3f26cb5fc815210c222f7a5584e8a11e59 100644 (file)
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
 static const struct of_device_id rk3x_i2c_match[] = {
        { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
        { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+       { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
        { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
        {},
 };
index 0f2f8484e8ec1f51ca8e265bd160d3cbee5c7885..e584d88ee337f66e158644076f7b9a2b0018177e 100644 (file)
@@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
-
-/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
 static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-       struct i2c_client       *client = to_i2c_client(dev);
+       struct i2c_client *client = to_i2c_client(dev);
        int rc;
 
        rc = acpi_device_uevent_modalias(dev, env);
        if (rc != -ENODEV)
                return rc;
 
-       if (add_uevent_var(env, "MODALIAS=%s%s",
-                          I2C_MODULE_PREFIX, client->name))
-               return -ENOMEM;
-       dev_dbg(dev, "uevent\n");
-       return 0;
+       return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
 }
 
 /* i2c bus recovery routines */
index 7748a0a5ddb9f1cabba4f5318078afdddc614bc9..8de073aed001482461b3ad12398c00fa9417fc95 100644 (file)
@@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_
        return i2c_demux_activate_master(priv, new_chan);
 }
 
-static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr,
-                          char *buf)
+static ssize_t available_masters_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
 {
        struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
        int count = 0, i;
 
        for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n",
-                                i == priv->cur_chan ? '*' : ' ', i,
-                                priv->chan[i].parent_np->full_name);
+               count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
+                                  i, priv->chan[i].parent_np->full_name,
+                                  i == priv->num_chan - 1 ? '\n' : ' ');
 
        return count;
 }
+static DEVICE_ATTR_RO(available_masters);
 
-static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr,
-                           const char *buf, size_t count)
+static ssize_t current_master_show(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", priv->cur_chan);
+}
+
+static ssize_t current_master_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
 {
        struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
        unsigned int val;
@@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att
 
        return ret < 0 ? ret : count;
 }
-static DEVICE_ATTR_RW(cur_master);
+static DEVICE_ATTR_RW(current_master);
 
 static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 {
@@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
        /* switch to first parent as active master */
        i2c_demux_activate_master(priv, 0);
 
-       err = device_create_file(&pdev->dev, &dev_attr_cur_master);
+       err = device_create_file(&pdev->dev, &dev_attr_available_masters);
        if (err)
                goto err_rollback;
 
+       err = device_create_file(&pdev->dev, &dev_attr_current_master);
+       if (err)
+               goto err_rollback_available;
+
        return 0;
 
+err_rollback_available:
+       device_remove_file(&pdev->dev, &dev_attr_available_masters);
 err_rollback:
        for (j = 0; j < i; j++) {
                of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
        struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
        int i;
 
-       device_remove_file(&pdev->dev, &dev_attr_cur_master);
+       device_remove_file(&pdev->dev, &dev_attr_current_master);
+       device_remove_file(&pdev->dev, &dev_attr_available_masters);
 
        i2c_demux_deactivate_master(priv);
 
index ba947df5a8c7d09af2c36a23d5b6a1d5f4df8bb3..c6935de425fa22ed2b0d08e7d55b5ae02ccde0c6 100644 (file)
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
                .enter = NULL }
 };
 
+static struct cpuidle_state skx_cstates[] = {
+       {
+               .name = "C1-SKX",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00),
+               .exit_latency = 2,
+               .target_residency = 2,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C1E-SKX",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01),
+               .exit_latency = 10,
+               .target_residency = 20,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C6-SKX",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 133,
+               .target_residency = 600,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
+       {
+               .enter = NULL }
+};
+
 static struct cpuidle_state atom_cstates[] = {
        {
                .name = "C1E-ATM",
@@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
                 * driver in this case
                 */
                dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
-               if (!dev->registered)
-                       intel_idle_cpu_init(hotcpu);
+               if (dev->registered)
+                       break;
+
+               if (intel_idle_cpu_init(hotcpu))
+                       return NOTIFY_BAD;
 
                break;
        }
@@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_skx = {
+       .state_table = skx_cstates,
+       .disable_promotion_to_c1e = true,
+};
 
 static const struct idle_cpu idle_cpu_avn = {
        .state_table = avn_cstates,
@@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        ICPU(0x56, idle_cpu_bdw),
        ICPU(0x4e, idle_cpu_skl),
        ICPU(0x5e, idle_cpu_skl),
+       ICPU(0x8e, idle_cpu_skl),
+       ICPU(0x9e, idle_cpu_skl),
+       ICPU(0x55, idle_cpu_skx),
        ICPU(0x57, idle_cpu_knl),
        {}
 };
@@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void)
        icpu = (const struct idle_cpu *)id->driver_data;
        cpuidle_state_table = icpu->state_table;
 
-       if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
-               lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
-       else
-               on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
-
        pr_debug(PREFIX "v" INTEL_IDLE_VERSION
                " model 0x%X\n", boot_cpu_data.x86_model);
 
-       pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
-               lapic_timer_reliable_states);
        return 0;
 }
 
 /*
  * intel_idle_cpuidle_devices_uninit()
- * unregister, free cpuidle_devices
+ * Unregisters the cpuidle devices.
  */
 static void intel_idle_cpuidle_devices_uninit(void)
 {
@@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void)
                dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
                cpuidle_unregister_device(dev);
        }
-
-       free_percpu(intel_idle_cpuidle_devices);
-       return;
 }
 
 /*
@@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void)
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
  */
-static int __init intel_idle_cpuidle_driver_init(void)
+static void __init intel_idle_cpuidle_driver_init(void)
 {
        int cstate;
        struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
                drv->state_count += 1;
        }
 
-       if (icpu->auto_demotion_disable_flags)
-               on_each_cpu(auto_demotion_disable, NULL, 1);
-
        if (icpu->byt_auto_demotion_disable_flag) {
                wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
                wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
        }
-
-       if (icpu->disable_promotion_to_c1e)     /* each-cpu is redundant */
-               on_each_cpu(c1e_promotion_disable, NULL, 1);
-
-       return 0;
 }
 
 
@@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
 
        if (cpuidle_register_device(dev)) {
                pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
-               intel_idle_cpuidle_devices_uninit();
                return -EIO;
        }
 
@@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void)
        if (retval)
                return retval;
 
+       intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+       if (intel_idle_cpuidle_devices == NULL)
+               return -ENOMEM;
+
        intel_idle_cpuidle_driver_init();
        retval = cpuidle_register_driver(&intel_idle_driver);
        if (retval) {
                struct cpuidle_driver *drv = cpuidle_get_driver();
                printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
                        drv ? drv->name : "none");
+               free_percpu(intel_idle_cpuidle_devices);
                return retval;
        }
 
-       intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-       if (intel_idle_cpuidle_devices == NULL)
-               return -ENOMEM;
-
        cpu_notifier_register_begin();
 
        for_each_online_cpu(i) {
                retval = intel_idle_cpu_init(i);
                if (retval) {
+                       intel_idle_cpuidle_devices_uninit();
                        cpu_notifier_register_done();
                        cpuidle_unregister_driver(&intel_idle_driver);
+                       free_percpu(intel_idle_cpuidle_devices);
                        return retval;
                }
        }
        __register_cpu_notifier(&cpu_hotplug_notifier);
 
+       if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
+               lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+       else
+               on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+
        cpu_notifier_register_done();
 
+       pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+               lapic_timer_reliable_states);
+
        return 0;
 }
 
 static void __exit intel_idle_exit(void)
 {
-       intel_idle_cpuidle_devices_uninit();
-       cpuidle_unregister_driver(&intel_idle_driver);
+       struct cpuidle_device *dev;
+       int i;
 
        cpu_notifier_register_begin();
 
@@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void)
                on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
        __unregister_cpu_notifier(&cpu_hotplug_notifier);
 
+       for_each_possible_cpu(i) {
+               dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+               cpuidle_unregister_device(dev);
+       }
+
        cpu_notifier_register_done();
 
-       return;
+       cpuidle_unregister_driver(&intel_idle_driver);
+       free_percpu(intel_idle_cpuidle_devices);
 }
 
 module_init(intel_idle_init);
index c73331f7782b8af4b91c94c26dd29195ddc45d9a..2072a31e813ba01fc0c15274ab60de324201564b 100644 (file)
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
 {
        int ret;
        int axis = chan->scan_index;
-       unsigned int raw_val;
+       __le16 raw_val;
 
        mutex_lock(&data->mutex);
        ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
        }
 
        ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
-                              &raw_val, 2);
+                              &raw_val, sizeof(raw_val));
        if (ret < 0) {
                dev_err(data->dev, "Error reading axis %d\n", axis);
                bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
-       *val = sign_extend32(raw_val >> chan->scan_type.shift,
+       *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
                             chan->scan_type.realbits - 1);
        ret = bmc150_accel_set_power_state(data, false);
        mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
                .realbits = (bits),                                     \
                .storagebits = 16,                                      \
                .shift = 16 - (bits),                                   \
+               .endianness = IIO_LE,                                   \
        },                                                              \
        .event_spec = &bmc150_accel_event,                              \
        .num_event_specs = 1                                            \
index af4aea7b20f950bea5c359247841d00eff0d2a02..82c718c515a01fd1c34eb25e5f9ecfac7e8a1a0a 100644 (file)
@@ -134,6 +134,7 @@ config AT91_ADC
 config AT91_SAMA5D2_ADC
        tristate "Atmel AT91 SAMA5D2 ADC"
        depends on ARCH_AT91 || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Say yes here to build support for Atmel SAMA5D2 ADC which is
          available on SAMA5D2 SoC family.
index dbee13ad33a3c341777beb60715ec1f7007fd767..2e154cb5168567ce3f09ef095bb65a639802d7e4 100644 (file)
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
        if (ret)
                goto vref_disable;
 
+       platform_set_drvdata(pdev, indio_dev);
+
        ret = iio_device_register(indio_dev);
        if (ret < 0)
                goto per_clk_disable_unprepare;
index 929508e5266c000864d1095860b7b16f87baf51b..998dc3caad4c15cabe58f7922a594e6976de23ec 100644 (file)
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
        },
        [max11644] = {
                .bits = 12,
-               .int_vref_mv = 2048,
+               .int_vref_mv = 4096,
                .mode_list = max11644_mode_list,
                .num_modes = ARRAY_SIZE(max11644_mode_list),
                .default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
        },
        [max11645] = {
                .bits = 12,
-               .int_vref_mv = 4096,
+               .int_vref_mv = 2048,
                .mode_list = max11644_mode_list,
                .num_modes = ARRAY_SIZE(max11644_mode_list),
                .default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
        },
        [max11646] = {
                .bits = 10,
-               .int_vref_mv = 2048,
+               .int_vref_mv = 4096,
                .mode_list = max11644_mode_list,
                .num_modes = ARRAY_SIZE(max11644_mode_list),
                .default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
        },
        [max11647] = {
                .bits = 10,
-               .int_vref_mv = 4096,
+               .int_vref_mv = 2048,
                .mode_list = max11644_mode_list,
                .num_modes = ARRAY_SIZE(max11644_mode_list),
                .default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
        { "max11615", max11615 },
        { "max11616", max11616 },
        { "max11617", max11617 },
+       { "max11644", max11644 },
+       { "max11645", max11645 },
+       { "max11646", max11646 },
+       { "max11647", max11647 },
        {}
 };
 
index bbce3b09ac45abdbc3491fee51d74bcfbab9eb87..4dac567e75b4f00c61ea11d9c1dbc3b663e56a25 100644 (file)
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
 static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
 {
        int ret;
-       unsigned int raw_val;
+       __le16 raw_val;
 
        mutex_lock(&data->mutex);
        ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
        }
 
        ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
-                              2);
+                              sizeof(raw_val));
        if (ret < 0) {
                dev_err(data->dev, "Error reading axis %d\n", axis);
                bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
                return ret;
        }
 
-       *val = sign_extend32(raw_val, 15);
+       *val = sign_extend32(le16_to_cpu(raw_val), 15);
        ret = bmg160_set_power_state(data, false);
        mutex_unlock(&data->mutex);
        if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
                .sign = 's',                                            \
                .realbits = 16,                                 \
                .storagebits = 16,                                      \
+               .endianness = IIO_LE,                                   \
        },                                                              \
        .event_spec = &bmg160_event,                                    \
        .num_event_specs = 1                                            \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
                        mutex_unlock(&data->mutex);
                        goto err;
                }
-               data->buffer[i++] = ret;
+               data->buffer[i++] = val;
        }
        mutex_unlock(&data->mutex);
 
index 09db89359544840b5f9ba7b98a8b81a046388459..90ab8a2d2846f8a8591ee6b1615dce2c984020ef 100644 (file)
@@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
        mutex_lock(&data->lock);
 
-       while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) {
+       while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
                ret = max30100_read_measurement(data);
                if (ret)
                        break;
 
                iio_push_to_buffers(data->indio_dev, data->buffer);
+               cnt--;
        }
 
        mutex_unlock(&data->lock);
index a7f557af43895bfcf3b60e1298bee529246f0d35..847455a2d6bb0275bdc9636eb23f1f7e00bdc653 100644 (file)
@@ -9,9 +9,8 @@ config INV_MPU6050_IIO
 
 config INV_MPU6050_I2C
        tristate "Invensense MPU6050 devices (I2C)"
-       depends on I2C
+       depends on I2C_MUX
        select INV_MPU6050_IIO
-       select I2C_MUX
        select REGMAP_I2C
        help
          This driver supports the Invensense MPU6050 devices.
index f581256d9d4c2473eca8979b3c0075c308f844a8..5ee4e0dc093e687445cfb3daab47f8a7f0e182fe 100644 (file)
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
        return 0;
 }
 
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+       const struct acpi_device_id *id;
+
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+       if (!id)
+               return NULL;
+
+       *chip_id = (int)id->driver_data;
+
+       return dev_name(dev);
+}
+
 /**
  *  inv_mpu_probe() - probe function.
  *  @client:          i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct inv_mpu6050_state *st;
-       int result;
-       const char *name = id ? id->name : NULL;
+       int result, chip_type;
        struct regmap *regmap;
+       const char *name;
 
        if (!i2c_check_functionality(client->adapter,
                                     I2C_FUNC_SMBUS_I2C_BLOCK))
                return -EOPNOTSUPP;
 
+       if (id) {
+               chip_type = (int)id->driver_data;
+               name = id->name;
+       } else if (ACPI_HANDLE(&client->dev)) {
+               name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+               if (!name)
+                       return -ENODEV;
+       } else {
+               return -ENOSYS;
+       }
+
        regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
        }
 
        result = inv_mpu_core_probe(regmap, client->irq, name,
-                                   NULL, id->driver_data);
+                                   NULL, chip_type);
        if (result < 0)
                return result;
 
index dea6c4361de013c38441100dc26078fd13393a89..7bcb8d839f0549a1a89c6291a78796841cf1db5c 100644 (file)
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        struct regmap *regmap;
        const struct spi_device_id *id = spi_get_device_id(spi);
        const char *name = id ? id->name : NULL;
+       const int chip_type = id ? id->driver_data : 0;
 
        regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        }
 
        return inv_mpu_core_probe(regmap, spi->irq, name,
-                                 inv_mpu_i2c_disable, id->driver_data);
+                                 inv_mpu_i2c_disable, chip_type);
 }
 
 static int inv_mpu_remove(struct spi_device *spi)
index b976332d45d3469361d7801201fce65dbd93e583..90462fcf543698bab68378f5a0cb4b8d7262ef2c 100644 (file)
@@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
        unsigned int modes;
 
        memset(config, 0, sizeof(*config));
+       config->watermark = ~0;
 
        /*
         * If there is just one buffer and we are removing it there is nothing
index f6a07dc32ae486a045b97cfb4406783a9aa05ab8..a6af56ad10e1d828668f62471218da3f3875aa0e 100644 (file)
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
        mutex_lock(&data->lock);
        data->gesture_mode_running = 1;
 
-       while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) {
+       while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
                ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
                                      &data->buffer, 4);
 
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
                        goto err_read;
 
                iio_push_to_buffers(data->indio_dev, data->buffer);
+               cnt--;
        }
 
 err_read:
index 9c5c9ef3f1dad25f37e108aeb9752940f46aa393..0e931a9a1669424278a1daa935a919e32db32e35 100644 (file)
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
        int rc;
        int irq;
 
+       init_waitqueue_head(&data->data_ready_queue);
+       clear_bit(0, &data->flags);
        if (client->irq)
                irq = client->irq;
        else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
                return rc;
        }
 
-       init_waitqueue_head(&data->data_ready_queue);
-       clear_bit(0, &data->flags);
        data->eoc_irq = irq;
 
        return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
        int eoc_gpio;
        int err;
        const char *name = NULL;
-       enum asahi_compass_chipset chipset;
+       enum asahi_compass_chipset chipset = AK_MAX_TYPE;
 
        /* Grab and set up the supplied GPIO. */
        if (client->dev.platform_data)
index 06a4d9c3558196ec54d1d04200e62b14753c4c62..9daca46819222bedb73e166eda7d1607af6277c8 100644 (file)
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
 static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
 {
 }
+#define ST_MAGN_TRIGGER_SET_STATE NULL
 #endif /* CONFIG_IIO_BUFFER */
 
 #endif /* ST_MAGN_H */
index cb00d59da45616af57a03f29454434ec50fa9826..c2e257d97effb9813ef22b58ab580507dbf4134d 100644 (file)
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
                              NULL);
 
                /* Coudn't find default GID location */
-               WARN_ON(ix < 0);
+               if (WARN_ON(ix < 0))
+                       goto release;
 
                zattr_type.gid_type = gid_type;
 
index 4a9aa0433b07f46b67412b3cf70c9e1bc63c4a45..7713ef089c3ccc5109045c2ed88455799d740033 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <asm/uaccess.h>
 
+#include <rdma/ib.h>
 #include <rdma/ib_cm.h>
 #include <rdma/ib_user_cm.h>
 #include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
        struct ib_ucm_cmd_hdr hdr;
        ssize_t result;
 
+       if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+               return -EACCES;
+
        if (len < sizeof(hdr))
                return -EINVAL;
 
index dd3bcceadfdef2d277109e0ecfeb334052c1efa4..c0f3826abb30aa09d755650d5055610679c846c8 100644 (file)
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
        struct rdma_ucm_cmd_hdr hdr;
        ssize_t ret;
 
+       if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+               return -EACCES;
+
        if (len < sizeof(hdr))
                return -EINVAL;
 
index 28ba2cc815355e332dd9877178f38e76784cbcc6..31f422a70623a5df98f94acc90a6f6a2d598c2bc 100644 (file)
@@ -48,6 +48,8 @@
 
 #include <asm/uaccess.h>
 
+#include <rdma/ib.h>
+
 #include "uverbs.h"
 
 MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        int srcu_key;
        ssize_t ret;
 
+       if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+               return -EACCES;
+
        if (count < sizeof hdr)
                return -EINVAL;
 
index 15b8adbf39c0f46fcf25726a5eefc6cc24f2d046..b65b3541e7329e9d716dd40fb81189db97ec9700 100644 (file)
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
 void ib_drain_qp(struct ib_qp *qp)
 {
        ib_drain_sq(qp);
-       ib_drain_rq(qp);
+       if (!qp->srq)
+               ib_drain_rq(qp);
 }
 EXPORT_SYMBOL(ib_drain_qp);
index 42a7b8952d13241e40b531dc9959f3405f3a62a6..3234a8be16f6c53e4d155920cb766236b0f74306 100644 (file)
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
        dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
        dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
        dev->ibdev.iwcm->get_qp = iwch_get_qp;
+       memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+              sizeof(dev->ibdev.iwcm->ifname));
 
        ret = ib_register_device(&dev->ibdev, NULL);
        if (ret)
index b4eeb783573c88e50904ff20668fdcd10ee86e4c..b0b9557244582bf4c87c5a389c6c7c5bfb286717 100644 (file)
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
                                      &cq->bar2_qid,
                                      user ? &cq->bar2_pa : NULL);
-       if (user && !cq->bar2_va) {
+       if (user && !cq->bar2_pa) {
                pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
                        pci_name(rdev->lldi.pdev), cq->cqid);
                ret = -EINVAL;
index 124682dc57094cfb3970167bf2172863e4f7e0f5..7574f394fdac892856f9e7e7f98d3d112dc321b1 100644 (file)
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
        dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
        dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+       memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+              sizeof(dev->ibdev.iwcm->ifname));
 
        ret = ib_register_device(&dev->ibdev, NULL);
        if (ret)
index e17fb5d5e0339ac2e29541e44c145a51d0d19d2f..e8993e49b8b3a6d5a99634c4f7376a761762bda0 100644 (file)
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
 
        if (pbar2_pa)
                *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+       if (is_t4(rdev->lldi.adapter_type))
+               return NULL;
+
        return rdev->bar2_kva + bar2_qoffset;
 }
 
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        /*
         * User mode must have bar2 access.
         */
-       if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+       if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
                pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
                        pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
                goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 void c4iw_drain_sq(struct ib_qp *ibqp)
 {
        struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+       unsigned long flag;
+       bool need_to_wait;
 
-       wait_for_completion(&qp->sq_drained);
+       spin_lock_irqsave(&qp->lock, flag);
+       need_to_wait = !t4_sq_empty(&qp->wq);
+       spin_unlock_irqrestore(&qp->lock, flag);
+
+       if (need_to_wait)
+               wait_for_completion(&qp->sq_drained);
 }
 
 void c4iw_drain_rq(struct ib_qp *ibqp)
 {
        struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+       unsigned long flag;
+       bool need_to_wait;
+
+       spin_lock_irqsave(&qp->lock, flag);
+       need_to_wait = !t4_rq_empty(&qp->wq);
+       spin_unlock_irqrestore(&qp->lock, flag);
 
-       wait_for_completion(&qp->rq_drained);
+       if (need_to_wait)
+               wait_for_completion(&qp->rq_drained);
 }
index 92745d755272df6b097e5cbdf0131ecc5ff046f8..38f917a6c7784101247c6a59af14b21a73d85d0a 100644 (file)
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
 /**
  * i40iw_get_dst_ipv6
  */
-#if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
                                            struct sockaddr_in6 *dst_addr)
 {
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
        dst = ip6_route_output(&init_net, NULL, &fl6);
        return dst;
 }
-#endif
 
 /**
  * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
  * @dst_ip: remote ip address
  * @arpindex: if there is an arp entry
  */
-#if IS_ENABLED(CONFIG_IPV6)
 static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
                                         u32 *src,
                                         u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
        dst_release(dst);
        return rc;
 }
-#endif
 
 /**
  * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
                                                            cm_info->loc_addr[0],
                                                            cm_info->rem_addr[0],
                                                            oldarpindex);
-#if IS_ENABLED(CONFIG_IPV6)
-               else
+               else if (IS_ENABLED(CONFIG_IPV6))
                        arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
                                                                 cm_info->loc_addr,
                                                                 cm_info->rem_addr,
                                                                 oldarpindex);
-#endif
+               else
+                       arpindex = -EINVAL;
        }
        if (arpindex < 0) {
                i40iw_pr_err("cm_node arpindex\n");
index 90e5af21737e0017902b1bf72f7e77cecc195b54..e41fae2422ab8ce6459c27fcec8384969cdf1ac7 100644 (file)
@@ -1863,7 +1863,7 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
 }
 
 /* client interface functions */
-static struct i40e_client_ops i40e_ops = {
+static const struct i40e_client_ops i40e_ops = {
        .open = i40iw_open,
        .close = i40iw_close,
        .l2_param_change = i40iw_l2param_change,
index fd97534762b8dc7f83651a52e9398a3aa6e4944e..81b0e1fbec1d96ddce580e4021f2560589cc22b2 100644 (file)
@@ -419,7 +419,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 }
 
 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-                             enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
+                             enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
+                             bool shrink_wqe)
 {
        int s;
 
@@ -477,7 +478,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
         * We set WQE size to at least 64 bytes, this way stamping
         * invalidates each WQE.
         */
-       if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
+       if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
            qp->sq_signal_bits && BITS_PER_LONG == 64 &&
            type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
            !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
@@ -642,6 +643,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 {
        int qpn;
        int err;
+       struct ib_qp_cap backup_cap;
        struct mlx4_ib_sqp *sqp;
        struct mlx4_ib_qp *qp;
        enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -775,7 +777,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                                goto err;
                }
 
-               err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
+               memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
+               err = set_kernel_sq_size(dev, &init_attr->cap,
+                                        qp_type, qp, true);
                if (err)
                        goto err;
 
@@ -787,9 +791,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        *qp->db.db = 0;
                }
 
-               if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
-                       err = -ENOMEM;
-                       goto err_db;
+               if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+                                  &qp->buf, gfp)) {
+                       memcpy(&init_attr->cap, &backup_cap,
+                              sizeof(backup_cap));
+                       err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
+                                                qp, false);
+                       if (err)
+                               goto err_db;
+
+                       if (mlx4_buf_alloc(dev->dev, qp->buf_size,
+                                          PAGE_SIZE * 2, &qp->buf, gfp)) {
+                               err = -ENOMEM;
+                               goto err_db;
+                       }
                }
 
                err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
index 5acf346e048e3bb45fc33d97eeecd1321bf2ef9d..4cb81f68d85090f43cc694eca6f400b99ab3adfd 100644 (file)
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                     sizeof(struct mlx5_wqe_ctrl_seg)) /
                     sizeof(struct mlx5_wqe_data_seg);
        props->max_sge = min(max_rq_sg, max_sq_sg);
-       props->max_sge_rd = props->max_sge;
+       props->max_sge_rd          = MLX5_MAX_SGE_RD;
        props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
        props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
        props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
        struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_hca_vport_context *rep;
-       int max_mtu;
-       int oper_mtu;
+       u16 max_mtu;
+       u16 oper_mtu;
        int err;
        u8 ib_link_width_oper;
        u8 vl_hw_cap;
@@ -1438,7 +1438,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
        if (!ft) {
                ft = mlx5_create_auto_grouped_flow_table(ns, priority,
                                                         num_entries,
-                                                        num_groups);
+                                                        num_groups,
+                                                        0);
 
                if (!IS_ERR(ft)) {
                        prio->refcount = 0;
index f16c818ad2e62f6cd507b80b096a06d3c0e73164..b46c25542a7c6285d6c5088f2006889d59eaf10f 100644 (file)
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end);
-int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
-                         u8 port, struct ifla_vf_info *info);
-int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
-                             u8 port, int state);
-int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
-                        u8 port, struct ifla_vf_stats *stats);
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
-                       u64 guid, int type);
-
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)  {}
 
 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+                         u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+                             u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+                        u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+                       u64 guid, int type);
+
 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
                               int index);
 
index 3ea9e055fdd37f27be5f8accad68aca74caa3780..2b27d1351cf7df44c6803fe79315bf277b071826 100644 (file)
@@ -356,7 +356,7 @@ static int nes_netdev_stop(struct net_device *netdev)
 /**
  * nes_nic_send
  */
-static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
        struct nes_device *nesdev = nesvnic->nesdev;
@@ -413,7 +413,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
                                        netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
                        kfree_skb(skb);
                        nesvnic->tx_sw_dropped++;
-                       return NETDEV_TX_LOCKED;
+                       return false;
                }
                set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
                bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
@@ -454,8 +454,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
        set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
        nesnic->sq_head++;
        nesnic->sq_head &= nesnic->sq_size - 1;
-
-       return NETDEV_TX_OK;
+       return true;
 }
 
 
@@ -479,7 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32 tso_wqe_length;
        u32 curr_tcp_seq;
        u32 wqe_count=1;
-       u32 send_rc;
        struct iphdr *iph;
        __le16 *wqe_fragment_length;
        u32 nr_frags;
@@ -500,9 +498,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
         *              skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
         */
 
-       if (!netif_carrier_ok(netdev))
-               return NETDEV_TX_OK;
-
        if (netif_queue_stopped(netdev))
                return NETDEV_TX_BUSY;
 
@@ -673,13 +668,11 @@ tso_sq_no_longer_full:
                        skb_linearize(skb);
                        skb_set_transport_header(skb, hoffset);
                        skb_set_network_header(skb, nhoffset);
-                       send_rc = nes_nic_send(skb, netdev);
-                       if (send_rc != NETDEV_TX_OK)
+                       if (!nes_nic_send(skb, netdev))
                                return NETDEV_TX_OK;
                }
        } else {
-               send_rc = nes_nic_send(skb, netdev);
-               if (send_rc != NETDEV_TX_OK)
+               if (!nes_nic_send(skb, netdev))
                        return NETDEV_TX_OK;
        }
 
@@ -689,7 +682,7 @@ tso_sq_no_longer_full:
                nes_write32(nesdev->regs+NES_WQE_ALLOC,
                                (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        return NETDEV_TX_OK;
 }
index e449e394963f00d42cd11ecafbca6081f9011bcd..24f4a782e0f431282bb8f79fc496a0b998a7a55a 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/export.h>
 #include <linux/uio.h>
 
+#include <rdma/ib.h>
+
 #include "qib.h"
 #include "qib_common.h"
 #include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
        ssize_t ret = 0;
        void *dest;
 
+       if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+               return -EACCES;
+
        if (count < sizeof(cmd.type)) {
                ret = -EINVAL;
                goto bail;
index bd82a6948dc89f803333b2abd1841d8067d9c7f5..a9e3bcc522c40167abc3e22caf18a2669b12e73d 100644 (file)
@@ -1637,9 +1637,9 @@ bail:
        spin_unlock_irqrestore(&qp->s_hlock, flags);
        if (nreq) {
                if (call_send)
-                       rdi->driver_f.schedule_send_no_lock(qp);
-               else
                        rdi->driver_f.do_send(qp);
+               else
+                       rdi->driver_f.schedule_send_no_lock(qp);
        }
        return err;
 }
index c8ed53562c9b54cfc3fd21ece14ea6b95f94c872..b2f42835d76d51cfed64266d6f884f46de3a11d1 100644 (file)
@@ -766,7 +766,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
        } else {
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                ++tx->tx_head;
 
                if (++priv->tx_outstanding == ipoib_sendq_size) {
index f0e55e47eb540c0c5fbf95e5ee00d38df14b9e97..3643d559ba316960716809f93007d00a2d46fe0e 100644 (file)
@@ -637,7 +637,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                if (netif_queue_stopped(dev))
                        netif_wake_queue(dev);
        } else {
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
 
                address->last_send = priv->tx_head;
                ++priv->tx_head;
index 80807d6e5c4cff878f25eaf0861c9c8822843676..b940ef1c19c70e400905322a57ea0d49d2f0a3f2 100644 (file)
@@ -1036,7 +1036,7 @@ static void ipoib_timeout(struct net_device *dev)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
        ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
-                  jiffies_to_msecs(jiffies - dev->trans_start));
+                  jiffies_to_msecs(jiffies - dev_trans_start(dev)));
        ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
                   netif_queue_stopped(dev),
                   priv->tx_head, priv->tx_tail);
index 80b6bedc172f32a1d9dfad1e36ee8aa3a4a16240..64b3d11dcf1ed588a95d149f2bbd9ff884562893 100644 (file)
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        struct Scsi_Host *shost;
        struct iser_conn *iser_conn = NULL;
        struct ib_conn *ib_conn;
+       u32 max_fr_sectors;
        u16 max_cmds;
 
        shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                iser_conn = ep->dd_data;
                max_cmds = iser_conn->max_cmds;
                shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
-               shost->max_sectors = iser_conn->scsi_max_sectors;
 
                mutex_lock(&iser_conn->state_mutex);
                if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                 */
                shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
                        ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-               shost->max_sectors = min_t(unsigned int,
-                       1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
 
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                        goto free_host;
        }
 
+       /*
+        * FRs or FMRs can only map up to a (device) page per entry, but if the
+        * first entry is misaligned we'll end up using using two entries
+        * (head and tail) for a single page worth data, so we have to drop
+        * one segment from the calculation.
+        */
+       max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+       shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
        .queuecommand           = iscsi_queuecommand,
        .change_queue_depth     = scsi_change_queue_depth,
        .sg_tablesize           = ISCSI_ISER_DEF_SG_TABLESIZE,
-       .max_sectors            = ISER_DEF_MAX_SECTORS,
        .cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler= iscsi_eh_device_reset,
index 60b30d338a813447782449a54b6f7161874d4b22..411e4464ca235bc984d5bc34c723a52e06071f31 100644 (file)
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
 static void isert_release_work(struct work_struct *work);
-static void isert_wait4flush(struct isert_conn *isert_conn);
 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
        attr.qp_context = isert_conn;
        attr.send_cq = comp->cq;
        attr.recv_cq = comp->cq;
-       attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+       attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
        attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
        isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
                break;
        case ISER_CONN_UP:
                isert_conn_terminate(isert_conn);
-               isert_wait4flush(isert_conn);
+               ib_drain_qp(isert_conn->qp);
                isert_handle_unbound_conn(isert_conn);
                break;
        case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
        }
 }
 
-static void
-isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-       struct isert_conn *isert_conn = wc->qp->qp_context;
-
-       isert_print_wc(wc, "beacon");
-
-       isert_info("conn %p completing wait_comp_err\n", isert_conn);
-       complete(&isert_conn->wait_comp_err);
-}
-
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
-       struct ib_recv_wr *bad_wr;
-       static struct ib_cqe cqe = { .done = isert_beacon_done };
-
-       isert_info("conn %p\n", isert_conn);
-
-       init_completion(&isert_conn->wait_comp_err);
-       isert_conn->beacon.wr_cqe = &cqe;
-       /* post an indication that all flush errors were consumed */
-       if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
-               isert_err("conn %p failed to post beacon", isert_conn);
-               return;
-       }
-
-       wait_for_completion(&isert_conn->wait_comp_err);
-}
-
 /**
  * isert_put_unsol_pending_cmds() - Drop commands waiting for
  *     unsolicitate dataout
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->mutex);
 
-       isert_wait4flush(isert_conn);
+       ib_drain_qp(isert_conn->qp);
        isert_put_unsol_pending_cmds(conn);
        isert_wait4cmds(conn);
        isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
-       isert_wait4flush(isert_conn);
+       ib_drain_qp(isert_conn->qp);
        isert_put_conn(isert_conn);
 }
 
index 192788a4820cde54795c313a068e662017f222a0..147900cbb5788209d952f2f394c2991dd1ea8084 100644 (file)
@@ -209,14 +209,12 @@ struct isert_conn {
        struct ib_qp            *qp;
        struct isert_device     *device;
        struct mutex            mutex;
-       struct completion       wait_comp_err;
        struct kref             kref;
        struct list_head        fr_pool;
        int                     fr_pool_size;
        /* lock to protect fastreg pool */
        spinlock_t              pool_lock;
        struct work_struct      release_work;
-       struct ib_recv_wr       beacon;
        bool                    logout_posted;
        bool                    snd_w_inv;
 };
index 0bd3cb2f3c671a21fefd57dbc4851a24daa61e82..8b42401d4795646019f4498ed909b306630f19ec 100644 (file)
@@ -1264,26 +1264,40 @@ free_mem:
  */
 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
 {
-       struct se_session *se_sess;
        struct srpt_send_ioctx *ioctx;
-       int tag;
+       unsigned long flags;
 
        BUG_ON(!ch);
-       se_sess = ch->sess;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
-       if (tag < 0) {
-               pr_err("Unable to obtain tag for srpt_send_ioctx\n");
-               return NULL;
+       ioctx = NULL;
+       spin_lock_irqsave(&ch->spinlock, flags);
+       if (!list_empty(&ch->free_list)) {
+               ioctx = list_first_entry(&ch->free_list,
+                                        struct srpt_send_ioctx, free_list);
+               list_del(&ioctx->free_list);
        }
-       ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
-       memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
-       ioctx->ch = ch;
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       if (!ioctx)
+               return ioctx;
+
+       BUG_ON(ioctx->ch != ch);
        spin_lock_init(&ioctx->spinlock);
        ioctx->state = SRPT_STATE_NEW;
+       ioctx->n_rbuf = 0;
+       ioctx->rbufs = NULL;
+       ioctx->n_rdma = 0;
+       ioctx->n_rdma_wrs = 0;
+       ioctx->rdma_wrs = NULL;
+       ioctx->mapped_sg_count = 0;
        init_completion(&ioctx->tx_done);
-
-       ioctx->cmd.map_tag = tag;
+       ioctx->queue_status_only = false;
+       /*
+        * transport_init_se_cmd() does not initialize all fields, so do it
+        * here.
+        */
+       memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+       memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
 
        return ioctx;
 }
@@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        struct ib_cm_rep_param *rep_param;
        struct srpt_rdma_ch *ch, *tmp_ch;
        u32 it_iu_len;
-       int ret = 0;
+       int i, ret = 0;
        unsigned char *p;
 
        WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if (!ch->ioctx_ring)
                goto free_ch;
 
+       INIT_LIST_HEAD(&ch->free_list);
+       for (i = 0; i < ch->rq_size; i++) {
+               ch->ioctx_ring[i]->ch = ch;
+               list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+       }
+
        ret = srpt_create_ch_ib(ch);
        if (ret) {
                rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        p = &ch->sess_name[0];
 
 try_again:
-       ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
-                                       sizeof(struct srpt_send_ioctx),
+       ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
                                        TARGET_PROT_NORMAL, p, ch, NULL);
        if (IS_ERR(ch->sess)) {
                pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
        struct srpt_send_ioctx *ioctx = container_of(se_cmd,
                                struct srpt_send_ioctx, cmd);
        struct srpt_rdma_ch *ch = ioctx->ch;
-       struct se_session *se_sess = ch->sess;
+       unsigned long flags;
 
        WARN_ON(ioctx->state != SRPT_STATE_DONE);
        WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
                ioctx->n_rbuf = 0;
        }
 
-       percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+       spin_lock_irqsave(&ch->spinlock, flags);
+       list_add(&ioctx->free_list, &ch->free_list);
+       spin_unlock_irqrestore(&ch->spinlock, flags);
 }
 
 /**
index ca288f019315cda7142169ddbe5269772a7f398c..af9b8b527340c80f4c8af515cc4aa641a5c5b426 100644 (file)
@@ -179,6 +179,7 @@ struct srpt_recv_ioctx {
  * struct srpt_send_ioctx - SRPT send I/O context.
  * @ioctx:       See above.
  * @ch:          Channel pointer.
+ * @free_list:   Node in srpt_rdma_ch.free_list.
  * @n_rbuf:      Number of data buffers in the received SRP command.
  * @rbufs:       Pointer to SRP data buffer array.
  * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@ struct srpt_send_ioctx {
        struct srp_direct_buf   *rbufs;
        struct srp_direct_buf   single_rbuf;
        struct scatterlist      *sg;
+       struct list_head        free_list;
        spinlock_t              spinlock;
        enum srpt_command_state state;
        struct se_cmd           cmd;
index e8a84d12b7fffe812cd329a88da26f6922c219af..1142a93dd90b0ad8d25b6dbbd15cf8f9b4ee11b1 100644 (file)
@@ -153,6 +153,7 @@ static const struct xpad_device {
        { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+       { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
        { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
        { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
        { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
+       XPAD_XBOXONE_VENDOR(0x0738),            /* Mad Catz FightStick TE 2 */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
index d5994a745ffa5b5f4d1b411fec76f0a447a60f94..9829363345372add0e3a5fd6812ff49a6d527aba 100644 (file)
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
        input_set_drvdata(haptics->input_dev, haptics);
 
        haptics->input_dev->name = "arizona:haptics";
-       haptics->input_dev->dev.parent = pdev->dev.parent;
        haptics->input_dev->close = arizona_haptics_close;
        __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
 
index 3f02e0e03d128afaee7439e509f00d22f23e9e59..67aab86048ad73faa1f1af16c1ae1663cf4500f1 100644 (file)
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
                kpd_delay = 15625;
 
-       if (kpd_delay > 62500 || kpd_delay == 0) {
+       /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+       if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
                dev_err(&pdev->dev, "invalid power key trigger delay\n");
                return -EINVAL;
        }
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        pwr->name = "pmic8xxx_pwrkey";
        pwr->phys = "pmic8xxx_pwrkey/input0";
 
-       delay = (kpd_delay << 10) / USEC_PER_SEC;
-       delay = 1 + ilog2(delay);
+       delay = (kpd_delay << 6) / USEC_PER_SEC;
+       delay = ilog2(delay);
 
        err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
        if (err < 0) {
index 10c4e3d462f112f15ec9843093c5f988d44780b9..caa5a62c42fbe0b55e0230931d7d833922fdaab9 100644 (file)
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
 
        info->input_dev->name = "twl4030:vibrator";
        info->input_dev->id.version = 1;
-       info->input_dev->dev.parent = pdev->dev.parent;
        info->input_dev->close = twl4030_vibra_close;
        __set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
index ea63fad48de643be287648820c52bb7911771957..df3581f606282a2890b8ce78cf13d9e4882c3c0a 100644 (file)
@@ -45,7 +45,6 @@
 struct vibra_info {
        struct device *dev;
        struct input_dev *input_dev;
-       struct workqueue_struct *workqueue;
        struct work_struct play_work;
        struct mutex mutex;
        int irq;
@@ -182,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
 {
        struct vibra_info *info = container_of(work,
                                struct vibra_info, play_work);
+       int ret;
+
+       /* Do not allow effect, while the routing is set to use audio */
+       ret = twl6040_get_vibralr_status(info->twl6040);
+       if (ret & TWL6040_VIBSEL) {
+               dev_info(info->dev, "Vibra is configured for audio\n");
+               return;
+       }
 
        mutex_lock(&info->mutex);
 
@@ -200,24 +207,12 @@ static int vibra_play(struct input_dev *input, void *data,
                      struct ff_effect *effect)
 {
        struct vibra_info *info = input_get_drvdata(input);
-       int ret;
-
-       /* Do not allow effect, while the routing is set to use audio */
-       ret = twl6040_get_vibralr_status(info->twl6040);
-       if (ret & TWL6040_VIBSEL) {
-               dev_info(&input->dev, "Vibra is configured for audio\n");
-               return -EBUSY;
-       }
 
        info->weak_speed = effect->u.rumble.weak_magnitude;
        info->strong_speed = effect->u.rumble.strong_magnitude;
        info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
 
-       ret = queue_work(info->workqueue, &info->play_work);
-       if (!ret) {
-               dev_info(&input->dev, "work is already on queue\n");
-               return ret;
-       }
+       schedule_work(&info->play_work);
 
        return 0;
 }
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
 
        info->input_dev->name = "twl6040:vibrator";
        info->input_dev->id.version = 1;
-       info->input_dev->dev.parent = pdev->dev.parent;
        info->input_dev->close = twl6040_vibra_close;
        __set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
index 3a7f3a4a439635064a4e8ca78e1e7860b9072a47..7c18249d6c8e820e3fe86ffa66196164dbef2d3f 100644 (file)
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
                goto err_free_buf;
        }
 
+       /* Sanity check that a device has an endpoint */
+       if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+               dev_err(&usbinterface->dev,
+                       "Invalid number of endpoints\n");
+               error = -EINVAL;
+               goto err_free_urb;
+       }
+
        /*
         * The endpoint is always altsetting 0, we know this since we know
         * this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
         * HID report descriptor
         */
        if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
-                                    HID_DEVICE_TYPE, &hid_desc) != 0){
+                                    HID_DEVICE_TYPE, &hid_desc) != 0) {
                dev_err(&usbinterface->dev,
                        "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
                error = -EIO;
index 2160512e861af57d289fb5873ef80b81c2fbf7c9..5af7907d0af4dd6ab79639ee91287cdf0bc90202 100644 (file)
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
        return 0;
 }
 
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+       int error;
+
+       enable_irq(data->irq);
+
+       error = mxt_process_messages_until_invalid(data);
+       if (error)
+               return error;
+
+       return 0;
+}
+
 static int mxt_soft_reset(struct mxt_data *data)
 {
        struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
        /* Ignore CHG line for 100ms after reset */
        msleep(100);
 
-       enable_irq(data->irq);
+       mxt_acquire_irq(data);
 
        ret = mxt_wait_for_completion(data, &data->reset_completion,
                                      MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
        return ret;
 }
 
-static int mxt_acquire_irq(struct mxt_data *data)
-{
-       int error;
-
-       enable_irq(data->irq);
-
-       error = mxt_process_messages_until_invalid(data);
-       if (error)
-               return error;
-
-       return 0;
-}
-
 static int mxt_get_info(struct mxt_data *data)
 {
        struct i2c_client *client = data->client;
index 9bbadaaf6bc3723f044a9c44e8619773d242ec8f..7b3845aa5983ad57892a0022990f3353bcca82b6 100644 (file)
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
                        point.coord_x = point.coord_y = 0;
                }
 
-               point.state = payload[9 * i + 5] & 0x03;
-               point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+               point.state = payload[9 * i + 5] & 0x0f;
+               point.id = (payload[9 * i + 5] & 0xf0) >> 4;
 
                /* determine touch major, minor and orientation */
                point.area_major = max(payload[9 * i + 6],
index 374c129219ef0c48c8ff3004662c413d78ad2b4c..5efadad4615bf6d68c215897b15adbf9eab80b53 100644 (file)
@@ -92,6 +92,7 @@ struct iommu_dev_data {
        struct list_head dev_data_list;   /* For global dev_data_list */
        struct protection_domain *domain; /* Domain the device is bound to */
        u16 devid;                        /* PCI Device ID */
+       u16 alias;                        /* Alias Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
        bool passthrough;                 /* Device is identity mapped */
        struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
        return container_of(dom, struct protection_domain, domain);
 }
 
+static inline u16 get_device_id(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
 static struct iommu_dev_data *alloc_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
        return dev_data;
 }
 
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       *(u16 *)data = alias;
+       return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 devid, ivrs_alias, pci_alias;
+
+       devid = get_device_id(dev);
+       ivrs_alias = amd_iommu_alias_table[devid];
+       pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+       if (ivrs_alias == pci_alias)
+               return ivrs_alias;
+
+       /*
+        * DMA alias showdown
+        *
+        * The IVRS is fairly reliable in telling us about aliases, but it
+        * can't know about every screwy device.  If we don't have an IVRS
+        * reported alias, use the PCI reported alias.  In that case we may
+        * still need to initialize the rlookup and dev_table entries if the
+        * alias is to a non-existent device.
+        */
+       if (ivrs_alias == devid) {
+               if (!amd_iommu_rlookup_table[pci_alias]) {
+                       amd_iommu_rlookup_table[pci_alias] =
+                               amd_iommu_rlookup_table[devid];
+                       memcpy(amd_iommu_dev_table[pci_alias].data,
+                              amd_iommu_dev_table[devid].data,
+                              sizeof(amd_iommu_dev_table[pci_alias].data));
+               }
+
+               return pci_alias;
+       }
+
+       pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+               "for device %s[%04x:%04x], kernel reported alias "
+               "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+               PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+               PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+               PCI_FUNC(pci_alias));
+
+       /*
+        * If we don't have a PCI DMA alias and the IVRS alias is on the same
+        * bus, then the IVRS table may know about a quirk that we don't.
+        */
+       if (pci_alias == devid &&
+           PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+               pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+               pdev->dma_alias_devfn = ivrs_alias & 0xff;
+               pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+                       PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+                       dev_name(dev));
+       }
+
+       return ivrs_alias;
+}
+
 static struct iommu_dev_data *find_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
        return dev_data;
 }
 
-static inline u16 get_device_id(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
 static struct iommu_dev_data *get_dev_data(struct device *dev)
 {
        return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
        if (!dev_data)
                return -ENOMEM;
 
+       dev_data->alias = get_alias(dev);
+
        if (pci_iommuv2_capable(pdev)) {
                struct amd_iommu *iommu;
 
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
        u16 devid, alias;
 
        devid = get_device_id(dev);
-       alias = amd_iommu_alias_table[devid];
+       alias = get_alias(dev);
 
        memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
        memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
        int ret;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        ret = iommu_flush_dte(iommu, dev_data->devid);
        if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        bool ats;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
        ats   = dev_data->ats.enabled;
 
        /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
                return;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        /* decrease reference counters */
        dev_data->domain->dev_iommu[iommu->index] -= 1;
index 2409e3bd3df21e973db1231360b7a50fba66efa1..7c39ac4b9c537df09128a0c106d3bfbd009f7e7f 100644 (file)
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (smmu_domain->smmu)
                goto out_unlock;
 
+       /* We're bypassing these SIDs, so don't allocate an actual context */
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               smmu_domain->smmu = smmu;
+               goto out_unlock;
+       }
+
        /*
         * Mapping the requested stage onto what we support is surprisingly
         * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        void __iomem *cb_base;
        int irq;
 
-       if (!smmu)
+       if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
                return;
 
        /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
-       /* Devices in an IOMMU group may already be configured */
-       ret = arm_smmu_master_configure_smrs(smmu, cfg);
-       if (ret)
-               return ret == -EEXIST ? 0 : ret;
-
        /*
         * FIXME: This won't be needed once we have IOMMU-backed DMA ops
-        * for all devices behind the SMMU.
+        * for all devices behind the SMMU. Note that we need to take
+        * care configuring SMRs for devices both a platform_device and
+        * and a PCI device (i.e. a PCI host controller)
         */
        if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
                return 0;
 
+       /* Devices in an IOMMU group may already be configured */
+       ret = arm_smmu_master_configure_smrs(smmu, cfg);
+       if (ret)
+               return ret == -EEXIST ? 0 : ret;
+
        for (i = 0; i < cfg->num_streamids; ++i) {
                u32 idx, s2cr;
 
index 72d6182666cbd24ba785fc59572c655b6f0c2c8f..58f2fe687a24ddd29ac6d786862eefc33098d795 100644 (file)
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
                unsigned int s_length = sg_dma_len(s);
                unsigned int s_dma_len = s->length;
 
-               s->offset = s_offset;
+               s->offset += s_offset;
                s->length = s_length;
                sg_dma_address(s) = dma_addr + s_offset;
                dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
 
        for_each_sg(sg, s, nents, i) {
                if (sg_dma_address(s) != DMA_ERROR_CODE)
-                       s->offset = sg_dma_address(s);
+                       s->offset += sg_dma_address(s);
                if (sg_dma_len(s))
                        s->length = sg_dma_len(s);
                sg_dma_address(s) = DMA_ERROR_CODE;
index a2e1b7f14df29cc78b625ec88455438d0fa1fe07..e1852e845d21f1f8014f7e51789d0e8682c5c8e8 100644 (file)
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        }
 
        /* register PCI DMA alias device */
-       if (req_id != dma_alias && dev_is_pci(dev)) {
+       if (dev_is_pci(dev) && req_id != dma_alias) {
                tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
                                               dma_alias & 0xff, NULL, domain);
 
index bfd4f7c3b1d8a9da9ce6cd2bce3decd8e27e0fca..b9df1411c8942c18b266b11e1cf572ecf9bcad15 100644 (file)
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
        if (!group->default_domain) {
                group->default_domain = __iommu_domain_alloc(dev->bus,
                                                             IOMMU_DOMAIN_DMA);
-               group->domain = group->default_domain;
+               if (!group->domain)
+                       group->domain = group->default_domain;
        }
 
        ret = iommu_group_add_device(group, dev);
index a6f593a0a29eda65c1f383dbe1700c13a8e9be27..5710a06c30498e1b40f4c645f56f1c9b2a49eb4e 100644 (file)
@@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
        int i;
 
        for (i = 0; i < iommu->num_mmu; i++)
-               active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
-                                       RK_MMU_STATUS_STALL_ACTIVE;
+               active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+                                          RK_MMU_STATUS_STALL_ACTIVE);
 
        return active;
 }
@@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
        int i;
 
        for (i = 0; i < iommu->num_mmu; i++)
-               enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
-                                       RK_MMU_STATUS_PAGING_ENABLED;
+               enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+                                          RK_MMU_STATUS_PAGING_ENABLED);
 
        return enable;
 }
index 94a30da0cfacac7d48e7e157b7ae7cd803708b50..4dffccf532a2173ce17f289aeeb0f69bdfac4dc3 100644 (file)
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
        /* Update the pcpu_masks */
-       for (i = 0; i < gic_vpes; i++)
+       for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(irq, pcpu_masks[i].pcpu_mask);
        set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
        gic_map_to_vpe(intr, vpe);
-       for (i = 0; i < gic_vpes; i++)
+       for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[vpe].pcpu_mask);
        spin_unlock_irqrestore(&gic_lock, flags);
index d7c286656a25721ec58fa16a5f3fdb277a0747a5..1a1d99704fe694ad2f0c19933fdfc7df89803da9 100644 (file)
@@ -1147,8 +1147,6 @@ static byte test_c_ind_mask_bit(PLCI *plci, word b)
 
 static void dump_c_ind_mask(PLCI *plci)
 {
-       static char hex_digit_table[0x10] =
-               {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
        word i, j, k;
        dword d;
        char *p;
@@ -1165,7 +1163,7 @@ static void dump_c_ind_mask(PLCI *plci)
                                d = plci->c_ind_mask_table[i + j];
                                for (k = 0; k < 8; k++)
                                {
-                                       *(--p) = hex_digit_table[d & 0xf];
+                                       *(--p) = hex_asc_lo(d);
                                        d >>= 4;
                                }
                        }
@@ -10507,7 +10505,6 @@ static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
 
 static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
 {
-       static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
        word n, i, j;
        char *p;
        char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
@@ -10690,13 +10687,13 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
        n = li_total_channels;
        if (n > MIXER_MAX_DUMP_CHANNELS)
                n = MIXER_MAX_DUMP_CHANNELS;
+
        p = hex_line;
        for (j = 0; j < n; j++)
        {
                if ((j & 0x7) == 0)
                        *(p++) = ' ';
-               *(p++) = hex_digit_table[li_config_table[j].curchnl >> 4];
-               *(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf];
+               p = hex_byte_pack(p, li_config_table[j].curchnl);
        }
        *p = '\0';
        dbug(1, dprintf("[%06lx] CURRENT %s",
@@ -10706,8 +10703,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
        {
                if ((j & 0x7) == 0)
                        *(p++) = ' ';
-               *(p++) = hex_digit_table[li_config_table[j].channel >> 4];
-               *(p++) = hex_digit_table[li_config_table[j].channel & 0xf];
+               p = hex_byte_pack(p, li_config_table[j].channel);
        }
        *p = '\0';
        dbug(1, dprintf("[%06lx] CHANNEL %s",
@@ -10717,8 +10713,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
        {
                if ((j & 0x7) == 0)
                        *(p++) = ' ';
-               *(p++) = hex_digit_table[li_config_table[j].chflags >> 4];
-               *(p++) = hex_digit_table[li_config_table[j].chflags & 0xf];
+               p = hex_byte_pack(p, li_config_table[j].chflags);
        }
        *p = '\0';
        dbug(1, dprintf("[%06lx] CHFLAG  %s",
@@ -10730,8 +10725,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
                {
                        if ((j & 0x7) == 0)
                                *(p++) = ' ';
-                       *(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4];
-                       *(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf];
+                       p = hex_byte_pack(p, li_config_table[i].flag_table[j]);
                }
                *p = '\0';
                dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
@@ -10744,8 +10738,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
                {
                        if ((j & 0x7) == 0)
                                *(p++) = ' ';
-                       *(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4];
-                       *(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf];
+                       p = hex_byte_pack(p, li_config_table[i].coef_table[j]);
                }
                *p = '\0';
                dbug(1, dprintf("[%06lx] COEF[%02x]%s",
index a0efb4cefa1c3ce467b7f9232d27da2dc5fd3565..5609deee7cd3f7891076dd474493bcfa91c7ad87 100644 (file)
@@ -127,7 +127,7 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
        if (lp->in_idx >= MAX_SKB_BUFFERS)
                lp->in_idx = 0; /* wrap around */
        lp->sk_count++;         /* adjust counter */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* If we just used up the very last entry in the
         * TX ring on this device, tell the queueing
index aa5dd5668528912ec1f402deea49951fe8e15ff2..c151c6daa67ee8aa2ae600b4462f146d85776335 100644 (file)
@@ -1153,7 +1153,7 @@ static void isdn_net_tx_timeout(struct net_device *ndev)
                 * ever called   --KG
                 */
        }
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        netif_wake_queue(ndev);
 }
 
@@ -1291,7 +1291,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        }
                } else {
                        /* Device is connected to an ISDN channel */
-                       ndev->trans_start = jiffies;
+                       netif_trans_update(ndev);
                        if (!lp->dialstate) {
                                /* ISDN connection is established, try sending */
                                int ret;
index e2d4e58230f581c2f1ac7a86f89e44e8fe529f05..0c5d8de41b23ab513fd4e03c51f3c528e06e515d 100644 (file)
@@ -278,7 +278,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
        case X25_IFACE_DATA:
                if (*state == WAN_CONNECTED) {
                        skb_pull(skb, 1);
-                       cprot->net_dev->trans_start = jiffies;
+                       netif_trans_update(cprot->net_dev);
                        ret = (cprot->dops->data_req(cprot, skb));
                        /* prepare for future retransmissions */
                        if (ret) skb_push(skb, 1);
index 0d29b5a6356d729a6ceb987bccc8c2dbe6779823..99e5f9751e8b1746835b28c4bd4e2a5d1b53fd14 100644 (file)
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_mISDN))
+               return -EINVAL;
+
        lock_sock(sk);
 
        if (_pms(sk)->dev) {
index eb934b0242e0e17652b7fc17c3255b5e70e40d15..67392b6ab845fbdec7d54798b591c24e544347a9 100644 (file)
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
  * Actually now I think of it, it's possible that Ron *is* half the Plan 9
  * userbase.  Oh well.
  */
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
 {
        /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
        return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
  *
  * This routine indicates if a particular trap number could be delivered
  * directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective.  See Mastery for
+ * how we could do this anyway...
  */
 static bool direct_trap(unsigned int num)
 {
index ac8ad0461e809db526bfc1db1ca2290ac612a43d..69b3814afd2f63c7fabe82dde698fae44b2d1862 100644 (file)
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
 bool send_notify_to_eventfd(struct lg_cpu *cpu);
 void init_clockdev(struct lg_cpu *cpu);
 bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
 int init_interrupts(void);
 void free_interrupts(void);
 
index 6a4cd771a2be62b4172cc26a178ca85fbf7e6d27..adc162c7040d7ef0a2f8f738e7a21bab1d57ba09 100644 (file)
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
                        return;
                break;
        case 32 ... 255:
+               /* This might be a syscall. */
+               if (could_be_syscall(cpu->regs->trapnum))
+                       break;
+
                /*
-                * These values mean a real interrupt occurred, in which case
+                * Other values mean a real interrupt occurred, in which case
                 * the Host handler has already been run. We just do a
                 * friendly check if another process should now be run, then
                 * return to run the Guest again.
index dc11bbf27274380ecf60b55491193363d947e5c0..58d04726cdd7dc13f0faad15aeec89214ffce620 100644 (file)
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
                                       size_t count, loff_t *ppos)
 {
        struct mbox_test_device *tdev = filp->private_data;
-       int ret;
 
        if (!tdev->tx_channel) {
                dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
                return -EINVAL;
        }
 
-       tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
-       if (!tdev->signal)
-               return -ENOMEM;
+       /* Only allocate memory if we need to */
+       if (!tdev->signal) {
+               tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+               if (!tdev->signal)
+                       return -ENOMEM;
+       }
 
-       ret = copy_from_user(tdev->signal, userbuf, count);
-       if (ret) {
+       if (copy_from_user(tdev->signal, userbuf, count)) {
                kfree(tdev->signal);
+               tdev->signal = NULL;
                return -EFAULT;
        }
 
-       return ret < 0 ? ret : count;
+       return count;
 }
 
 static const struct file_operations mbox_test_signal_ops = {
index bd07f39f06926dad75c7dfc4759c449b86af3ce2..dd2afbca51c9b8d2c155003548d31da28f81c3ce 100644 (file)
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
        int i;
 
        ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
+       if (!ctx)
+               return -ENOMEM;
 
        platform_set_drvdata(pdev, ctx);
 
index 6a4811f857056aaa64baff75265902b59be12ac9..4a36632c236f09ad9cdde06ff213961c712c3000 100644 (file)
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
 
        if (!np) {
                dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
-               return ERR_PTR(-ENOSYS);
+               return ERR_PTR(-EINVAL);
        }
 
        if (!of_get_property(np, "mbox-names", NULL)) {
                dev_err(cl->dev,
                        "%s() requires an \"mbox-names\" property\n", __func__);
-               return ERR_PTR(-ENOSYS);
+               return ERR_PTR(-EINVAL);
        }
 
        of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
index 0ddf638d60f3645c4a083b37027625dfd850b97f..043828d541f794b331fdaef51e2649a58a472525 100644 (file)
@@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void)
                struct acpi_generic_address *db_reg;
                struct acpi_pcct_hw_reduced *pcct_ss;
                pcc_mbox_channels[i].con_priv = pcct_entry;
-               pcct_entry = (struct acpi_subtable_header *)
-                       ((unsigned long) pcct_entry + pcct_entry->length);
 
                /* If doorbell is in system memory cache the virt address */
                pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void)
                if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                        pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
                                                        db_reg->bit_width/8);
+               pcct_entry = (struct acpi_subtable_header *)
+                       ((unsigned long) pcct_entry + pcct_entry->length);
        }
 
        pcc_mbox_ctrl.num_chans = count;
index 7df6b4f1548a47935c7bafcba1ffa06ed3335ecb..3fe86b54d50be37e88f83504cff5de90820b5fb7 100644 (file)
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
 {
        ClearPagePrivate(page);
        set_page_private(page, 0);
-       page_cache_release(page);
+       put_page(page);
 }
 static void free_buffers(struct page *page)
 {
@@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
        if (!bitmap) /* there was no bitmap */
                return;
 
+       if (bitmap->sysfs_can_clear)
+               sysfs_put(bitmap->sysfs_can_clear);
+
        if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
                bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
                md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
        if (mddev->thread)
                mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
 
-       if (bitmap->sysfs_can_clear)
-               sysfs_put(bitmap->sysfs_can_clear);
-
        bitmap_free(bitmap);
 }
 
 /*
  * initialize the bitmap structure
  * if this returns an error, bitmap_destroy must be called to do clean up
+ * once mddev->bitmap is set
  */
 struct bitmap *bitmap_create(struct mddev *mddev, int slot)
 {
@@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
        struct bitmap_counts *counts;
        struct bitmap *bitmap = bitmap_create(mddev, slot);
 
-       if (IS_ERR(bitmap))
+       if (IS_ERR(bitmap)) {
+               bitmap_free(bitmap);
                return PTR_ERR(bitmap);
+       }
 
        rv = bitmap_init_from_disk(bitmap, 0);
        if (rv)
@@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                                else {
                                        mddev->bitmap = bitmap;
                                        rv = bitmap_load(mddev);
-                                       if (rv) {
-                                               bitmap_destroy(mddev);
+                                       if (rv)
                                                mddev->bitmap_info.offset = 0;
-                                       }
                                }
                                mddev->pers->quiesce(mddev, 0);
-                               if (rv)
+                               if (rv) {
+                                       bitmap_destroy(mddev);
                                        return rv;
+                               }
                        }
                }
        }
index 27f2ef300f8bb10bac594b094e3fafcc8db407d0..3970cda10080988887bfa07329241d94afd1b883 100644 (file)
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
        return 0;
 }
 
-#define WRITE_LOCK(cmd)        \
-       down_write(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_write(&cmd->root_lock); \
-               return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+       down_write(&cmd->root_lock);
+       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+               up_write(&cmd->root_lock);
+               return false;
        }
+       return true;
+}
 
-#define WRITE_LOCK_VOID(cmd) \
-       down_write(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_write(&cmd->root_lock); \
-               return; \
-       }
+#define WRITE_LOCK(cmd)                                \
+       do {                                    \
+               if (!cmd_write_lock((cmd)))     \
+                       return -EINVAL;         \
+       } while(0)
+
+#define WRITE_LOCK_VOID(cmd)                   \
+       do {                                    \
+               if (!cmd_write_lock((cmd)))     \
+                       return;                 \
+       } while(0)
 
 #define WRITE_UNLOCK(cmd) \
-       up_write(&cmd->root_lock)
+       up_write(&(cmd)->root_lock)
 
-#define READ_LOCK(cmd) \
-       down_read(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_read(&cmd->root_lock); \
-               return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+       down_read(&cmd->root_lock);
+       if (cmd->fail_io) {
+               up_read(&cmd->root_lock);
+               return false;
        }
+       return true;
+}
 
-#define READ_LOCK_VOID(cmd)    \
-       down_read(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_read(&cmd->root_lock); \
-               return; \
-       }
+#define READ_LOCK(cmd)                         \
+       do {                                    \
+               if (!cmd_read_lock((cmd)))      \
+                       return -EINVAL;         \
+       } while(0)
+
+#define READ_LOCK_VOID(cmd)                    \
+       do {                                    \
+               if (!cmd_read_lock((cmd)))      \
+                       return;                 \
+       } while(0)
 
 #define READ_UNLOCK(cmd) \
-       up_read(&cmd->root_lock)
+       up_read(&(cmd)->root_lock)
 
 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
 {
index be4905769a45637a4882af087544578cdb90efa4..3d3ac13287a4570847ae179a483fe13586e96f66 100644 (file)
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
                tio = alloc_tio(ci, ti, target_bio_nr);
                tio->len_ptr = len;
                r = clone_bio(tio, bio, sector, *len);
-               if (r < 0)
+               if (r < 0) {
+                       free_tio(ci->md, tio);
                        break;
+               }
                __map_bio(tio);
        }
 
index c068f171b4eb1140ed7956d8f73f6096c14961a1..14d3b37944df031214c2c6951ed15c46da104842 100644 (file)
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
         * go away inside make_request
         */
        sectors = bio_sectors(bio);
+       /* bio could be mergeable after passing to underlayer */
+       bio->bi_rw &= ~REQ_NOMERGE;
        mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
@@ -718,6 +720,7 @@ static void super_written(struct bio *bio)
 
        if (atomic_dec_and_test(&mddev->pending_writes))
                wake_up(&mddev->sb_wait);
+       rdev_dec_pending(rdev, mddev);
        bio_put(bio);
 }
 
@@ -732,6 +735,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
         */
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
+       atomic_inc(&rdev->nr_pending);
+
        bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
        bio->bi_iter.bi_sector = sector;
        bio_add_page(bio, page, size, 0);
@@ -6883,7 +6888,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 
        case ADD_NEW_DISK:
                /* We can support ADD_NEW_DISK on read-only arrays
-                * on if we are re-adding a preexisting device.
+                * only if we are re-adding a preexisting device.
                 * So require mddev->pers and MD_DISK_SYNC.
                 */
                if (mddev->pers) {
index 2ea12c6bf65997895e26a89fe8977451ae438d97..34783a3c8b3c1af780a092b8d11018f702541481 100644 (file)
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
                        (unsigned long long)zone_size>>1);
                zone_start = conf->strip_zone[j].zone_end;
        }
-       printk(KERN_INFO "\n");
 }
 
 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
        unsigned short blksize = 512;
 
+       *private_conf = ERR_PTR(-ENOMEM);
        if (!conf)
                return -ENOMEM;
        rdev_for_each(rdev1, mddev) {
index 39fb21e048e642dd2bf6d6729a9e8c0cd61310a9..a7f2b9c9f8a06fa84aa5f46d6c44c5760894502b 100644 (file)
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                        if (best_dist_disk < 0) {
                                if (is_badblock(rdev, this_sector, sectors,
                                                &first_bad, &bad_sectors)) {
-                                       if (first_bad < this_sector)
+                                       if (first_bad <= this_sector)
                                                /* Cannot use this */
                                                continue;
                                        best_good_sectors = first_bad - this_sector;
index 8ab8b65e17413e4a015aab5e073728fd23e21b47..e48c262ce0322fe504d92d14403c70cb3ef4a608 100644 (file)
@@ -3502,8 +3502,6 @@ returnbi:
                                dev = &sh->dev[i];
                        } else if (test_bit(R5_Discard, &dev->flags))
                                discard_pending = 1;
-                       WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
-                       WARN_ON(dev->page != dev->orig_page);
                }
 
        r5l_stripe_write_finished(sh);
index 6e43c95629ea8e8fc550b6fc63bf56b5c627b1ac..3cfd7af8c5cab06607a94c716951daa6166b60e2 100644 (file)
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(media_device_find_devres);
 
+#if IS_ENABLED(CONFIG_PCI)
 void media_device_pci_init(struct media_device *mdev,
                           struct pci_dev *pci_dev,
                           const char *name)
 {
-#ifdef CONFIG_PCI
        mdev->dev = &pci_dev->dev;
 
        if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
 
+#if IS_ENABLED(CONFIG_USB)
 void __media_device_usb_init(struct media_device *mdev,
                             struct usb_device *udev,
                             const char *board_name,
                             const char *driver_name)
 {
-#ifdef CONFIG_USB
        mdev->dev = &udev->dev;
 
        if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
 
 
 #endif /* CONFIG_MEDIA_CONTROLLER */
index feb521f28e14857b8344d7320769c87183cd17ac..4f494acd8150fd85f8aaedfb3572b85bb22b460d 100644 (file)
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, fmd);
 
-       /* Protect the media graph while we're registering entities */
-       mutex_lock(&fmd->media_dev.graph_mutex);
-
        ret = fimc_md_register_platform_entities(fmd, dev->of_node);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_clk;
-       }
 
        ret = fimc_md_register_sensor_entities(fmd);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_m_ent;
-       }
-
-       mutex_unlock(&fmd->media_dev.graph_mutex);
 
        ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
        if (ret)
index 0b44b9accf50794172ddb2249414883a1b125b43..af237af204e2760fc0efcaac6ac2dc31207ef662 100644 (file)
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_sens;
 
-       mutex_lock(&camif->media_dev.graph_mutex);
-
        ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_register_video_nodes(camif);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_create_media_links(camif);
        if (ret < 0)
-               goto err_unlock;
-
-       mutex_unlock(&camif->media_dev.graph_mutex);
+               goto err_sens;
 
        ret = media_device_register(&camif->media_dev);
        if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
        pm_runtime_put(dev);
        return 0;
 
-err_unlock:
-       mutex_unlock(&camif->media_dev.graph_mutex);
 err_sens:
        v4l2_device_unregister(&camif->v4l2_dev);
        media_device_unregister(&camif->media_dev);
index ca861aea68a573d37afbdf6569935d4e643485b0..6b469e8c4c6e209d0001c0ff8e9150057fee1c43 100644 (file)
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
                                "au8522", 0x8e >> 1, NULL);
                if (sd == NULL)
                        pr_err("analog subdev registration failed\n");
-#ifdef CONFIG_MEDIA_CONTROLLER
-               if (sd)
-                       dev->decoder = &sd->entity;
-#endif
        }
 
        /* Setup tuners */
index 5dc82e8c8670f8ea299cf01151bf7293455410b7..cc22b32776ad1c5159bb4d04b22228fca1aab3ed 100644 (file)
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
 #ifdef CONFIG_MEDIA_CONTROLLER
        if (dev->media_dev &&
                media_devnode_is_registered(&dev->media_dev->devnode)) {
+               /* clear enable_source, disable_source */
+               dev->media_dev->source_priv = NULL;
+               dev->media_dev->enable_source = NULL;
+               dev->media_dev->disable_source = NULL;
+
                media_device_unregister(dev->media_dev);
                media_device_cleanup(dev->media_dev);
+               kfree(dev->media_dev);
                dev->media_dev = NULL;
        }
 #endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
           Set the status so poll routines can check and avoid
           access after disconnect.
        */
-       dev->dev_state = DEV_DISCONNECTED;
+       set_bit(DEV_DISCONNECTED, &dev->dev_state);
 
        au0828_rc_unregister(dev);
        /* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
 #ifdef CONFIG_MEDIA_CONTROLLER
        struct media_device *mdev;
 
-       mdev = media_device_get_devres(&udev->dev);
+       mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
        if (!mdev)
                return -ENOMEM;
 
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
 {
 #ifdef CONFIG_MEDIA_CONTROLLER
        int ret;
-       struct media_entity *entity, *demod = NULL, *tuner = NULL;
+       struct media_entity *entity, *demod = NULL;
+       struct media_link *link;
 
        if (!dev->media_dev)
                return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
        }
 
        /*
-        * Find tuner and demod to disable the link between
-        * the two to avoid disable step when tuner is requested
-        * by video or audio. Note that this step can't be done
-        * until dvb graph is created during dvb register.
+        * Find tuner, decoder and demod.
+        *
+        * The tuner and decoder should be cached, as they'll be used by
+        *      au0828_enable_source.
+        *
+        * It also needs to disable the link between tuner and
+        * decoder/demod, to avoid disable step when tuner is requested
+        * by video or audio. Note that this step can't be done until dvb
+        * graph is created during dvb register.
        */
        media_device_for_each_entity(entity, dev->media_dev) {
-               if (entity->function == MEDIA_ENT_F_DTV_DEMOD)
+               switch (entity->function) {
+               case MEDIA_ENT_F_TUNER:
+                       dev->tuner = entity;
+                       break;
+               case MEDIA_ENT_F_ATV_DECODER:
+                       dev->decoder = entity;
+                       break;
+               case MEDIA_ENT_F_DTV_DEMOD:
                        demod = entity;
-               else if (entity->function == MEDIA_ENT_F_TUNER)
-                       tuner = entity;
+                       break;
+               }
        }
-       /* Disable link between tuner and demod */
-       if (tuner && demod) {
-               struct media_link *link;
 
-               list_for_each_entry(link, &demod->links, list) {
-                       if (link->sink->entity == demod &&
-                           link->source->entity == tuner) {
+       /* Disable link between tuner->demod and/or tuner->decoder */
+       if (dev->tuner) {
+               list_for_each_entry(link, &dev->tuner->links, list) {
+                       if (demod && link->sink->entity == demod)
+                               media_entity_setup_link(link, 0);
+                       if (dev->decoder && link->sink->entity == dev->decoder)
                                media_entity_setup_link(link, 0);
-                       }
                }
        }
 
index b0f0679719798c4b8212b9dabee3f992b018c17a..3d6687f0407dfe4c301ca2e9f4bca594c9201522 100644 (file)
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
        bool first = true;
 
        /* do nothing if device is disconnected */
-       if (ir->dev->dev_state == DEV_DISCONNECTED)
+       if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
                return 0;
 
        /* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
        cancel_delayed_work_sync(&ir->work);
 
        /* do nothing if device is disconnected */
-       if (ir->dev->dev_state != DEV_DISCONNECTED) {
+       if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
                /* Disable IR */
                au8522_rc_clear(ir, 0xe0, 1 << 4);
        }
index 13f6dab9ccc290f0cd698f7a87deeadfe8ee166e..32d7db96479cb2a5d9958a389ded67862a132a89 100644 (file)
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
 
 static int check_dev(struct au0828_dev *dev)
 {
-       if (dev->dev_state & DEV_DISCONNECTED) {
+       if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
                pr_info("v4l2 ioctl: device not present\n");
                return -ENODEV;
        }
 
-       if (dev->dev_state & DEV_MISCONFIGURED) {
-               pr_info("v4l2 ioctl: device is misconfigured; "
-                      "close and open it again\n");
+       if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+               pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
                return -EIO;
        }
        return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
        if (!dev)
                return 0;
 
-       if ((dev->dev_state & DEV_DISCONNECTED) ||
-           (dev->dev_state & DEV_MISCONFIGURED))
+       if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+           test_bit(DEV_MISCONFIGURED, &dev->dev_state))
                return 0;
 
        if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
        int ret = 0;
 
        dev->stream_state = STREAM_INTERRUPT;
-       if (dev->dev_state == DEV_DISCONNECTED)
+       if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
                return -ENODEV;
        else if (ret) {
-               dev->dev_state = DEV_MISCONFIGURED;
+               set_bit(DEV_MISCONFIGURED, &dev->dev_state);
                dprintk(1, "%s device is misconfigured!\n", __func__);
                return ret;
        }
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
        int ret;
 
        dprintk(1,
-               "%s called std_set %d dev_state %d stream users %d users %d\n",
+               "%s called std_set %d dev_state %ld stream users %d users %d\n",
                __func__, dev->std_set_in_tuner_core, dev->dev_state,
                dev->streaming_users, dev->users);
 
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
                au0828_analog_stream_enable(dev);
                au0828_analog_stream_reset(dev);
                dev->stream_state = STREAM_OFF;
-               dev->dev_state |= DEV_INITIALIZED;
+               set_bit(DEV_INITIALIZED, &dev->dev_state);
        }
        dev->users++;
        mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
        struct video_device *vdev = video_devdata(filp);
 
        dprintk(1,
-               "%s called std_set %d dev_state %d stream users %d users %d\n",
+               "%s called std_set %d dev_state %ld stream users %d users %d\n",
                __func__, dev->std_set_in_tuner_core, dev->dev_state,
                dev->streaming_users, dev->users);
 
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
                del_timer_sync(&dev->vbi_timeout);
        }
 
-       if (dev->dev_state == DEV_DISCONNECTED)
+       if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
                goto end;
 
        if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
                .type = V4L2_TUNER_ANALOG_TV,
        };
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
        struct video_device *vdev = video_devdata(file);
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
        struct au0828_dev *dev = video_drvdata(file);
        int rc;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        *norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
                [AU0828_VMUX_DVB] = "DVB",
        };
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        *i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
 {
        int i;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
        if (a->index != dev->ctrl_ainput)
                return -EINVAL;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
        return 0;
 }
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
        if (ret)
                return ret;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
        if (t->index != 0)
                return -EINVAL;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
 
        if (freq->tuner != 0)
                return -EINVAL;
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
        freq->frequency = dev->ctrl_freq;
        return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
        if (freq->tuner != 0)
                return -EINVAL;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
        if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                return -EINVAL;
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
 {
        struct au0828_dev *dev = video_drvdata(file);
 
-       dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+       dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
                dev->std_set_in_tuner_core, dev->dev_state);
 
        return au0828_writereg(dev, reg->reg, reg->val);
index ff7f8510fb77ad633e97bbb55e946bcffe02fdac..87f32846f1c01fad59c57765ac1c1e6ecfc86942 100644 (file)
@@ -21,6 +21,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/bitops.h>
 #include <linux/usb.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
 
 /* device state */
 enum au0828_dev_state {
-       DEV_INITIALIZED = 0x01,
-       DEV_DISCONNECTED = 0x02,
-       DEV_MISCONFIGURED = 0x04
+       DEV_INITIALIZED = 0,
+       DEV_DISCONNECTED = 1,
+       DEV_MISCONFIGURED = 2
 };
 
 struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
        int input_type;
        int std_set_in_tuner_core;
        unsigned int ctrl_input;
-       enum au0828_dev_state dev_state;
+       long unsigned int dev_state; /* defined at enum au0828_dev_state */;
        enum au0828_stream_state stream_state;
        wait_queue_head_t open;
 
index 12f5ebbd0436e770022e7501a80f5dda7849f9a1..ad2f3d27b2662092e0d3fa7f894b8486eb1920ba 100644 (file)
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
        printk(KERN_INFO "%s: %s found\n", __func__,
                                usbvision_device_data[model].model_string);
 
-       /*
-        * this is a security check.
-        * an exploit using an incorrect bInterfaceNumber is known
-        */
-       if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
-               return -ENODEV;
-
        if (usbvision_device_data[model].interface >= 0)
                interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
        else if (ifnum < dev->actconfig->desc.bNumInterfaces)
index 2a7b79bc90fdbb985a8ffacb84babb27f282aeb2..2228cd3a846e9536295c77faeff5d15bc22d66a3 100644 (file)
@@ -34,7 +34,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
 {
        struct media_entity *entity;
        struct media_entity *if_vid = NULL, *if_aud = NULL;
-       struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL;
+       struct media_entity *tuner = NULL, *decoder = NULL;
        struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
        bool is_webcam = false;
        u32 flags;
index df4c052c6bd6111803ab6dfad5edebcb814f3025..f300f060b3f34cdfeb8b1e12b90d9ab345c6b629 100644 (file)
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
 
        if (dma->pages) {
                for (i = 0; i < dma->nr_pages; i++)
-                       page_cache_release(dma->pages[i]);
+                       put_page(dma->pages[i]);
                kfree(dma->pages);
                dma->pages = NULL;
        }
index 5d016f496e0ed3530bc525cdeceaa2e606693046..9fbcb67a9ee6e0e22ce6e2f81cb13c57858459a9 100644 (file)
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
  * Will sleep if required for nonblocking == false.
  */
 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-                               int nonblocking)
+                            void *pb, int nonblocking)
 {
        unsigned long flags;
        int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
        /*
         * Only remove the buffer from done_list if v4l2_buffer can handle all
         * the planes.
-        * Verifying planes is NOT necessary since it already has been checked
-        * before the buffer is queued/prepared. So it can never fail.
         */
-       list_del(&(*vb)->done_entry);
+       ret = call_bufop(q, verify_planes_array, *vb, pb);
+       if (!ret)
+               list_del(&(*vb)->done_entry);
        spin_unlock_irqrestore(&q->done_lock, flags);
 
        return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
        struct vb2_buffer *vb = NULL;
        int ret;
 
-       ret = __vb2_get_done_vb(q, &vb, nonblocking);
+       ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
        if (ret < 0)
                return ret;
 
@@ -2297,6 +2297,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
        if (!vb2_is_streaming(q) || q->error)
                return POLLERR;
 
+       /*
+        * If this quirk is set and QBUF hasn't been called yet then
+        * return POLLERR as well. This only affects capture queues, output
+        * queues will always initialize waiting_for_buffers to false.
+        * This quirk is set by V4L2 for backwards compatibility reasons.
+        */
+       if (q->quirk_poll_must_check_waiting_for_buffers &&
+           q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+               return POLLERR;
+
        /*
         * For output streams you can call write() as long as there are fewer
         * buffers queued than there are buffers available.
index dbec5923fcf07d85ddde50a70686ddc4c397c5ed..3c3b517f1d1cacb5a7131263cd80c112bd22d619 100644 (file)
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
        vec = frame_vector_create(nr);
        if (!vec)
                return ERR_PTR(-ENOMEM);
-       ret = get_vaddr_frames(start, nr, write, 1, vec);
+       ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
        if (ret < 0)
                goto out_destroy;
        /* We accept only complete set of PFNs */
index 91f552124050d2b3b91d2190af16fbdf2f9d7243..7f366f1b0377a3557201a1bf64470472a29d5658 100644 (file)
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
        return 0;
 }
 
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+       return __verify_planes_array(vb, pb);
+}
+
 /**
  * __verify_length() - Verify that the bytesused value for each plane fits in
  * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 }
 
 static const struct vb2_buf_ops v4l2_buf_ops = {
+       .verify_planes_array    = __verify_planes_array_core,
        .fill_user_buffer       = __fill_v4l2_buffer,
        .fill_vb2_buffer        = __fill_vb2_buffer,
        .copy_timestamp         = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
        q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
        q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
                        == V4L2_BUF_FLAG_TIMESTAMP_COPY;
+       /*
+        * For compatibility with vb1: if QBUF hasn't been called yet, then
+        * return POLLERR as well. This only affects capture queues, output
+        * queues will always initialize waiting_for_buffers to false.
+        */
+       q->quirk_poll_must_check_waiting_for_buffers = true;
 
        return vb2_core_queue_init(q);
 }
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
                        poll_wait(file, &fh->wait, wait);
        }
 
-       /*
-        * For compatibility with vb1: if QBUF hasn't been called yet, then
-        * return POLLERR as well. This only affects capture queues, output
-        * queues will always initialize waiting_for_buffers to false.
-        */
-       if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
-               return POLLERR;
-
        return res | vb2_core_poll(q, file, wait);
 }
 EXPORT_SYMBOL_GPL(vb2_poll);
index cbe96072a6cc2619ecd0be52d45a837a7bf69a5c..6955c9e22d571a335864b6c701e40ee32079dfe1 100644 (file)
@@ -791,7 +791,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
                pSimple->Address.High = 0;
 
        mpt_put_msg_frame (LanCtx, mpt_dev, mf);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
                        IOC_AND_NETDEV_NAMES_s_s(dev),
index 10370f28050075fac6bbc550168e47e5e091b0f7..7edea9c19199a8a4714b2ea67eb8c277019b78a4 100644 (file)
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
                cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
 
+       /*
+        * Wait until no further interrupts are presented by the PSL
+        * for this context.
+        */
+       if (cxl_ops->irq_wait)
+               cxl_ops->irq_wait(ctx);
+
        /* release the reference to the group leader and mm handling pid */
        put_pid(ctx->pid);
        put_pid(ctx->glpid);
index 38e21cf7806ed59d6c925edd3e34f5c519d6a510..73dc2a33da7434d6552563407476a9d83f326942 100644 (file)
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An     = {0x0A0};
 #define CXL_PSL_DSISR_An_PE (1ull << (63-4))  /* PSL Error (implementation specific) */
 #define CXL_PSL_DSISR_An_AE (1ull << (63-5))  /* AFU Error */
 #define CXL_PSL_DSISR_An_OC (1ull << (63-6))  /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
 /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
 #define CXL_PSL_DSISR_An_M  DSISR_NOHPTE      /* PTE not found */
 #define CXL_PSL_DSISR_An_P  DSISR_PROTFAULT   /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
                                        u64 dsisr, u64 errstat);
        irqreturn_t (*psl_interrupt)(int irq, void *data);
        int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+       void (*irq_wait)(struct cxl_context *ctx);
        int (*attach_process)(struct cxl_context *ctx, bool kernel,
                        u64 wed, u64 amr);
        int (*detach_process)(struct cxl_context *ctx);
index be646dc41a2c66537e641fbb866f23478e866c39..8def4553acbaabe0307af131f321fb0d42397e91 100644 (file)
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
 void cxl_unmap_irq(unsigned int virq, void *cookie)
 {
        free_irq(virq, cookie);
-       irq_dispose_mapping(virq);
 }
 
 int cxl_register_one_irq(struct cxl *adapter,
index 387fcbdf97938f28efea56e4f8c808235ab5f213..ecf7557cd657f66caa932fce83c785bc241bda9e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/delay.h>
 #include <asm/synch.h>
 #include <misc/cxl-base.h>
 
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
        return fail_psl_irq(afu, &irq_info);
 }
 
+void native_irq_wait(struct cxl_context *ctx)
+{
+       u64 dsisr;
+       int timeout = 1000;
+       int ph;
+
+       /*
+        * Wait until no further interrupts are presented by the PSL
+        * for this context.
+        */
+       while (timeout--) {
+               ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+               if (ph != ctx->pe)
+                       return;
+               dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+               if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+                       return;
+               /*
+                * We are waiting for the workqueue to process our
+                * irq, so need to let that run here.
+                */
+               msleep(1);
+       }
+
+       dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+                " DSISR %016llx!\n", ph, dsisr);
+       return;
+}
+
 static irqreturn_t native_slice_irq_err(int irq, void *data)
 {
        struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
        .handle_psl_slice_error = native_handle_psl_slice_error,
        .psl_interrupt = NULL,
        .ack_irq = native_ack_irq,
+       .irq_wait = native_irq_wait,
        .attach_process = native_attach_process,
        .detach_process = native_detach_process,
        .support_attributes = native_support_attributes,
index e8b933111e0df78f100ba1ed0459b9d511afec55..9c677f3f3c26023c81f93c948e0388e41e8e1a52 100644 (file)
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
 {
        struct inode *root;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = IBMASMFS_MAGIC;
        sb->s_op = &ibmasmfs_s_ops;
        sb->s_time_gran = 1;
index 5f1a36b8fbb082af1d0251504e19a33e1dc8c621..0a5cbbe12452da62127af8ced48101bb9fc212b8 100644 (file)
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
                        break;
 
                val = kmalloc(len, GFP_KERNEL);
-               if (!val)
+               if (!val) {
+                       kfree(base);
                        break;
+               }
 
                *val = 0x12345678;
                base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
        }
        case CT_READ_BUDDY_AFTER_FREE: {
                unsigned long p = __get_free_page(GFP_KERNEL);
-               int saw, *val = kmalloc(1024, GFP_KERNEL);
+               int saw, *val;
                int *base;
 
                if (!p)
                        break;
 
-               if (!val)
+               val = kmalloc(1024, GFP_KERNEL);
+               if (!val) {
+                       free_page(p);
                        break;
+               }
 
                base = (int *)p;
 
index e94c7fb6712aab35a1a3278241c3bab494f68777..88e45234d527518e0d449d598c5f04a5be5538a1 100644 (file)
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                        ret = -EFAULT;
                        goto free_ret;
                }
+               /* Ensure desc has not changed between the two reads */
+               if (memcmp(&dd, dd_config, sizeof(dd))) {
+                       ret = -EINVAL;
+                       goto free_ret;
+               }
                mutex_lock(&vdev->vdev_mutex);
                mutex_lock(&vi->vop_mutex);
                ret = vop_virtio_add_device(vdev, dd_config);
index f42d9c4e45619ae5f15cbf7cc270ad1f57b2c3f7..f84a4275ca294af9fb2ca8b6ac15bcb9b20bf69f 100644 (file)
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
                if (dirty)
                        set_page_dirty(pages[i]);
 
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
                pages[i] = NULL;
        }
 }
index 3bdbe50a363f3250958887170b7162f3de4711f7..8a0147dfed27d864d419de730499960c54bc202c 100644 (file)
@@ -86,7 +86,6 @@ static int max_devices;
 
 /* TODO: Replace these with struct ida */
 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
 
 /*
  * There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
        unsigned int    usage;
        unsigned int    read_only;
        unsigned int    part_type;
-       unsigned int    name_idx;
        unsigned int    reset_done;
 #define MMC_BLK_READ           BIT(0)
 #define MMC_BLK_WRITE          BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                goto out;
        }
 
-       /*
-        * !subname implies we are creating main mmc_blk_data that will be
-        * associated with mmc_card with dev_set_drvdata. Due to device
-        * partitions, devidx will not coincide with a per-physical card
-        * index anymore so we keep track of a name index.
-        */
-       if (!subname) {
-               md->name_idx = find_first_zero_bit(name_use, max_devices);
-               __set_bit(md->name_idx, name_use);
-       } else
-               md->name_idx = ((struct mmc_blk_data *)
-                               dev_to_disk(parent)->private_data)->name_idx;
-
        md->area_type = area_type;
 
        /*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
         */
 
        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
-                "mmcblk%u%s", md->name_idx, subname ? subname : "");
+                "mmcblk%u%s", card->host->index, subname ? subname : "");
 
        if (mmc_card_mmc(card))
                blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
        struct list_head *pos, *q;
        struct mmc_blk_data *part_md;
 
-       __clear_bit(md->name_idx, name_use);
        list_for_each_safe(pos, q, &md->part) {
                part_md = list_entry(pos, struct mmc_blk_data, part);
                list_del(pos);
index 1d94607611d888d0e409a152b4f67df6724273df..6e4c55a4aab59998edd0251222cd0fb7451fae30 100644 (file)
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
         * They have to set these according to their abilities.
         */
        host->max_segs = 1;
-       host->max_seg_size = PAGE_CACHE_SIZE;
+       host->max_seg_size = PAGE_SIZE;
 
-       host->max_req_size = PAGE_CACHE_SIZE;
+       host->max_req_size = PAGE_SIZE;
        host->max_blk_size = 512;
-       host->max_blk_count = PAGE_CACHE_SIZE / 512;
+       host->max_blk_count = PAGE_SIZE / 512;
 
        return host;
 }
index 04feea8354cba706a08aa9706a73902970c900df..e657af0e95fafce5bf65e0127540339dfbbb2ba5 100644 (file)
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
 config MMC_SDHCI_ACPI
        tristate "SDHCI support for ACPI enumerated SDHCI controllers"
        depends on MMC_SDHCI && ACPI
+       select IOSF_MBI if X86
        help
          This selects support for ACPI enumerated SDHCI controllers,
          identified by ACPI Compatibility ID PNP0D40 or specific
index 6839e41c6d585294bc75ab01a69f73e1f433b9b1..bed6a494f52c933ccfbe8218cfaf6249dd401c88 100644 (file)
 #include <linux/mmc/pm.h>
 #include <linux/mmc/slot-gpio.h>
 
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
 #include "sdhci.h"
 
 enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
        .ops = &sdhci_acpi_ops_int,
 };
 
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+       static const struct x86_cpu_id byt[] = {
+               { X86_VENDOR_INTEL, 6, 0x37 },
+               {}
+       };
+
+       return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP                 0x63
+#define BYT_IOSF_OCP_NETCTRL0          0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE      GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+       u32 val = 0;
+
+       if (!sdhci_acpi_byt())
+               return;
+
+       if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+                         &val)) {
+               dev_err(dev, "%s read error\n", __func__);
+               return;
+       }
+
+       if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+               return;
+
+       val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+       if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+                          val)) {
+               dev_err(dev, "%s write error\n", __func__);
+               return;
+       }
+
+       dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+       if (!sdhci_acpi_byt())
+               return false;
+
+       if (!iosf_mbi_available())
+               return true;
+
+       sdhci_acpi_byt_setting(dev);
+
+       return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+       return false;
+}
+
+#endif
+
 static int bxt_get_cd(struct mmc_host *mmc)
 {
        int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        if (acpi_bus_get_status(device) || !device->status.present)
                return -ENODEV;
 
+       if (sdhci_acpi_byt_defer(dev))
+               return -EPROBE_DEFER;
+
        hid = acpi_device_hid(device);
        uid = device->pnp.unique_id;
 
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
 
+       sdhci_acpi_byt_setting(&c->pdev->dev);
+
        return sdhci_resume_host(c->host);
 }
 
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
 
+       sdhci_acpi_byt_setting(&c->pdev->dev);
+
        return sdhci_runtime_resume_host(c->host);
 }
 
index 62aa5d0efceecc81c79f36806c91386a37502ff7..79e19017343edb0eeffc1e8ac1ddcbe165994bc3 100644 (file)
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
        slot->cd_idx = 0;
        slot->cd_override_level = true;
        if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+           slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
            slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
                slot->host->mmc_host_ops.get_cd = bxt_get_cd;
 
@@ -1171,6 +1172,30 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXTM_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = PCI_DEVICE_ID_INTEL_APL_EMMC,
index d1a0b4db60db41d67ad7d0e34652f2d13d2d3507..89e7151684a1fea26c35f27fee1c5ae2332e4eb8 100644 (file)
@@ -28,6 +28,9 @@
 #define PCI_DEVICE_ID_INTEL_BXT_SD     0x0aca
 #define PCI_DEVICE_ID_INTEL_BXT_EMMC   0x0acc
 #define PCI_DEVICE_ID_INTEL_BXT_SDIO   0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD    0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC  0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO  0x1ad0
 #define PCI_DEVICE_ID_INTEL_APL_SD     0x5aca
 #define PCI_DEVICE_ID_INTEL_APL_EMMC   0x5acc
 #define PCI_DEVICE_ID_INTEL_APL_SDIO   0x5ad0
index aca439d3ca83fde1b0e63832c15d4eb5fdf9a971..30132500aa1c83aec0c501354d7b9fb4c1fae1a7 100644 (file)
@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
                __func__, uhs, ctrl_2);
 }
 
+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
+                           unsigned short vdd)
+{
+       struct mmc_host *mmc = host->mmc;
+       u8 pwr = host->pwr;
+
+       sdhci_set_power(host, mode, vdd);
+
+       if (host->pwr == pwr)
+               return;
+
+       if (host->pwr == 0)
+               vdd = 0;
+
+       if (!IS_ERR(mmc->supply.vmmc)) {
+               spin_unlock_irq(&host->lock);
+               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+               spin_lock_irq(&host->lock);
+       }
+}
+
 static const struct sdhci_ops pxav3_sdhci_ops = {
        .set_clock = sdhci_set_clock,
+       .set_power = pxav3_set_power,
        .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .set_bus_width = sdhci_set_bus_width,
index f8c4762bb48dbff5a2a826e57352f1dfc12d05bc..bcc0de47fe7e1833254c1e8fcb95eed112c42031 100644 (file)
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
        .pdata = &sdhci_tegra114_pdata,
 };
 
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
-       .pdata = &sdhci_tegra114_pdata,
-       .nvquirks = NVQUIRK_ENABLE_SDR50 |
-                   NVQUIRK_ENABLE_DDR50 |
-                   NVQUIRK_ENABLE_SDR104 |
-                   NVQUIRK_HAS_PADCALIB,
-};
-
 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
        .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
                  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
 
 static const struct of_device_id sdhci_tegra_dt_match[] = {
        { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
-       { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+       { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
        { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
        { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
        { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
index 8670f162dec7452153625f589a3d24aec22e889d..6bd3d1794966d13b921751e32a5c670b56799770 100644 (file)
@@ -1210,10 +1210,24 @@ clock_set:
 }
 EXPORT_SYMBOL_GPL(sdhci_set_clock);
 
-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-                           unsigned short vdd)
+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
+                               unsigned short vdd)
 {
        struct mmc_host *mmc = host->mmc;
+
+       spin_unlock_irq(&host->lock);
+       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+       spin_lock_irq(&host->lock);
+
+       if (mode != MMC_POWER_OFF)
+               sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+       else
+               sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+}
+
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+                    unsigned short vdd)
+{
        u8 pwr = 0;
 
        if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
                if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
                        sdhci_runtime_pm_bus_off(host);
-               vdd = 0;
        } else {
                /*
                 * Spec says that we should clear the power reg before setting
@@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
                        mdelay(10);
        }
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
 
-       if (!IS_ERR(mmc->supply.vmmc)) {
-               spin_unlock_irq(&host->lock);
-               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
-               spin_lock_irq(&host->lock);
-       }
+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+                             unsigned short vdd)
+{
+       struct mmc_host *mmc = host->mmc;
+
+       if (host->ops->set_power)
+               host->ops->set_power(host, mode, vdd);
+       else if (!IS_ERR(mmc->supply.vmmc))
+               sdhci_set_power_reg(host, mode, vdd);
+       else
+               sdhci_set_power(host, mode, vdd);
 }
 
 /*****************************************************************************\
@@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
                }
        }
 
-       sdhci_set_power(host, ios->power_mode, ios->vdd);
+       __sdhci_set_power(host, ios->power_mode, ios->vdd);
 
        if (host->ops->platform_send_init_74_clocks)
                host->ops->platform_send_init_74_clocks(host, ios->power_mode);
index 3bd28033dbd94aa021cbea850ffac71d763edd2b..0f39f4f84d10f2f9a49541701cecb9b105e56100 100644 (file)
@@ -529,6 +529,8 @@ struct sdhci_ops {
 #endif
 
        void    (*set_clock)(struct sdhci_host *host, unsigned int clock);
+       void    (*set_power)(struct sdhci_host *host, unsigned char mode,
+                            unsigned short vdd);
 
        int             (*enable_dma)(struct sdhci_host *host);
        unsigned int    (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
 }
 
 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+                    unsigned short vdd);
 void sdhci_set_bus_width(struct sdhci_host *host, int width);
 void sdhci_reset(struct sdhci_host *host, u8 mask);
 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
index 8d870ce9f9442ef9c672e3167f59660337c4cbc9..d9a655f47d4165448e34bf79c5cc2fcb1449e755 100644 (file)
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
                mmc->caps |= pd->caps;
        mmc->max_segs = 32;
        mmc->max_blk_size = 512;
-       mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+       mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
        mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
        mmc->max_seg_size = mmc->max_req_size;
 
index 8372a413848c19f8ed61e8c38e6a3b8ebb0a4ba7..7fc8b7aa83f029eb199f1accd3de9d47ea12b152 100644 (file)
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
                                  MMC_CAP_1_8V_DDR |
                                  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
+       /* TODO MMC DDR is not working on A80 */
+       if (of_device_is_compatible(pdev->dev.of_node,
+                                   "allwinner,sun9i-a80-mmc"))
+               mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
        ret = mmc_of_parse(mmc);
        if (ret)
                goto error_free_dma;
index 675435873823126170602156ff99ca385e451d18..7fb0c034dcb654d90691316f21b674cf565fa8e8 100644 (file)
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
                }
        }
 
-       if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+       if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
                          (align & PAGE_MASK))) || !multiple) {
                ret = -EINVAL;
                goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
                }
        }
 
-       if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+       if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
                          (align & PAGE_MASK))) || !multiple) {
                ret = -EINVAL;
                goto pio;
index 03f6e74c190691c74e4845bdd94401710baaed5c..0521b466274804d03718f592b7b49a9ee3670037 100644 (file)
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
        mmc->caps2 |= pdata->capabilities2;
        mmc->max_segs = 32;
        mmc->max_blk_size = 512;
-       mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+       mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
                mmc->max_segs;
        mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
        mmc->max_seg_size = mmc->max_req_size;
index b2752fe711f2956d52e26d8d30c751919fcbde3e..807c06e203c39cc3d672700333e818e7b730c3d7 100644 (file)
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
        /* Set .max_segs to some random number. Feel free to adjust. */
        mmc->max_segs = 32;
        mmc->max_blk_size = 512;
-       mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+       mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
        mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
        /*
         * Setting .max_seg_size to 1 page would simplify our page-mapping code,
index e2c0057737e67473126f00af3d693ab790c612a9..7c887f111a7d03eaeb41dd4bdb6bce67bf81609b 100644 (file)
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
                                break;
                        }
 
-               page_cache_release(page);
+               put_page(page);
                pages--;
                index++;
        }
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
                        return PTR_ERR(page);
 
                memcpy(buf, page_address(page) + offset, cpylen);
-               page_cache_release(page);
+               put_page(page);
 
                if (retlen)
                        *retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
                        unlock_page(page);
                        balance_dirty_pages_ratelimited(mapping);
                }
-               page_cache_release(page);
+               put_page(page);
 
                if (retlen)
                        *retlen += cpylen;
index b6facac54fc0f18c30dc7adced01223d1f076c9a..557b8462f55ed6f46961d2201affedd2c2746401 100644 (file)
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
  * This is the first phase of the normal nand_scan() function. It reads the
  * flash ID and sets up MTD fields accordingly.
  *
- * The mtd->owner field must be set to the module of the caller.
  */
 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
                    struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
  *
  * This fills out all the uninitialized function pointers with the defaults.
  * The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
  */
 int nand_scan(struct mtd_info *mtd, int maxchips)
 {
        int ret;
 
-       /* Many callers got this wrong, so check for it for a while... */
-       if (!mtd->owner && caller_is_module()) {
-               pr_crit("%s called with NULL mtd->owner!\n", __func__);
-               BUG();
-       }
-
        ret = nand_scan_ident(mtd, maxchips, NULL);
        if (!ret)
                ret = nand_scan_tail(mtd);
index 1fd519503bb17b4ea3d76ede948437a265178f1f..a58169a28741e7e0f3db85b77170594de299fd87 100644 (file)
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
        int i;
 
        for (i = 0; i < ns->held_cnt; i++)
-               page_cache_release(ns->held_pages[i]);
+               put_page(ns->held_pages[i]);
 }
 
 /* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
        struct page *page;
        struct address_space *mapping = file->f_mapping;
 
-       start_index = pos >> PAGE_CACHE_SHIFT;
-       end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+       start_index = pos >> PAGE_SHIFT;
+       end_index = (pos + count - 1) >> PAGE_SHIFT;
        if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
                return -EINVAL;
        ns->held_cnt = 0;
index 2a1ba62b7da20ff921ec36005a8b4d40ec17756b..0c5415b05ea945f39da2482ca8b34fad91ff3c31 100644 (file)
@@ -62,9 +62,8 @@ config DUMMY
          this device is consigned into oblivion) with a configurable IP
          address. It is most commonly used in order to make your currently
          inactive SLIP address seem like a real address for local programs.
-         If you use SLIP or PPP, you might want to say Y here. Since this
-         thing often comes in handy, the default is Y. It won't enlarge your
-         kernel either. What a deal. Read about it in the Network
+         If you use SLIP or PPP, you might want to say Y here. It won't
+         enlarge your kernel. What a deal. Read about it in the Network
          Administrator's Guide, available from
          <http://www.tldp.org/docs.html#guide>.
 
@@ -193,8 +192,26 @@ config GENEVE
          To compile this driver as a module, choose M here: the module
          will be called geneve.
 
+config GTP
+       tristate "GPRS Tunneling Protocol datapath (GTP-U)"
+       depends on INET && NET_UDP_TUNNEL
+       select NET_IP_TUNNEL
+       ---help---
+         This allows one to create gtp virtual interfaces that provide
+         the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
+         is used to prevent subscribers from accessing mobile carrier core
+         network infrastructure. This driver requires a userspace software that
+         implements the signaling protocol (GTP-C) to update its PDP context
+         base, such as OpenGGSN <http://git.osmocom.org/openggsn/). This
+         tunneling protocol is implemented according to the GSM TS 09.60 and
+         3GPP TS 29.060 standards.
+
+         To compile this drivers as a module, choose M here: the module
+         wil be called gtp.
+
 config MACSEC
        tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+       select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
        ---help---
index 1aa7cb845663152c272633667d3b1affe619f3dd..7336cbd3ef5d94d4d28a6e2913e0e8439b205bf3 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
 obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GTP) += gtp.o
 obj-$(CONFIG_NLMON) += nlmon.o
 obj-$(CONFIG_NET_VRF) += vrf.o
 
index 7f2a032c354c2c210af55c23c72f6ec1fcd9b4ea..1b2e9217ec789a4b338690a9e065d06c1d1b07f5 100644 (file)
@@ -861,7 +861,7 @@ static void cops_timeout(struct net_device *dev)
        }
        printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
        cops_jumpstart(dev);    /* Restart the card. */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index 0d9b45ff1bb21bc7878c251c63082a5400a7d93b..81f90c4703ae266eb6c894bf636059ee389d4d6e 100644 (file)
@@ -433,7 +433,7 @@ static void __init com90xx_probe(void)
        kfree(iomem);
 }
 
-static int check_mirror(unsigned long addr, size_t size)
+static int __init check_mirror(unsigned long addr, size_t size)
 {
        void __iomem *p;
        int res = -1;
index 141c2a42d7edad6f9e833d7ab56068d7e2dd71bb..910c12e2638e3615084ca9f6a148030c8f2358e1 100644 (file)
@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
        /* allow change of MTU according to the CANFD ability of the device */
        switch (new_mtu) {
        case CAN_MTU:
+               /* 'CANFD-only' controllers can not switch to CAN_MTU */
+               if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+                       return -EINVAL;
+
                priv->ctrlmode &= ~CAN_CTRLMODE_FD;
                break;
 
        case CANFD_MTU:
-               if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+               /* check for potential CANFD ability */
+               if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+                   !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
                        return -EINVAL;
 
                priv->ctrlmode |= CAN_CTRLMODE_FD;
@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
                                = { .len = sizeof(struct can_bittiming_const) },
 };
 
+static int can_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       bool is_can_fd = false;
+
+       /* Make sure that valid CAN FD configurations always consist of
+        * - nominal/arbitration bittiming
+        * - data bittiming
+        * - control mode with CAN_CTRLMODE_FD set
+        */
+
+       if (data[IFLA_CAN_CTRLMODE]) {
+               struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+               is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+       }
+
+       if (is_can_fd) {
+               if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+                       return -EOPNOTSUPP;
+       }
+
+       if (data[IFLA_CAN_DATA_BITTIMING]) {
+               if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+                       return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
 static int can_changelink(struct net_device *dev,
                          struct nlattr *tb[], struct nlattr *data[])
 {
@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
 
        if (data[IFLA_CAN_CTRLMODE]) {
                struct can_ctrlmode *cm;
+               u32 ctrlstatic;
+               u32 maskedflags;
 
                /* Do not allow changing controller mode while running */
                if (dev->flags & IFF_UP)
                        return -EBUSY;
                cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+               ctrlstatic = priv->ctrlmode_static;
+               maskedflags = cm->flags & cm->mask;
+
+               /* check whether provided bits are allowed to be passed */
+               if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+                       return -EOPNOTSUPP;
+
+               /* do not check for static fd-non-iso if 'fd' is disabled */
+               if (!(maskedflags & CAN_CTRLMODE_FD))
+                       ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
 
-               /* check whether changed bits are allowed to be modified */
-               if (cm->mask & ~priv->ctrlmode_supported)
+               /* make sure static options are provided by configuration */
+               if ((maskedflags & ctrlstatic) != ctrlstatic)
                        return -EOPNOTSUPP;
 
                /* clear bits to be modified and copy the flag values */
                priv->ctrlmode &= ~cm->mask;
-               priv->ctrlmode |= (cm->flags & cm->mask);
+               priv->ctrlmode |= maskedflags;
 
                /* CAN_CTRLMODE_FD can only be set when driver supports FD */
                if (priv->ctrlmode & CAN_CTRLMODE_FD)
@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
        .maxtype        = IFLA_CAN_MAX,
        .policy         = can_policy,
        .setup          = can_setup,
+       .validate       = can_validate,
        .newlink        = can_newlink,
        .changelink     = can_changelink,
        .get_size       = can_get_size,
index a1bd54ffd31ee5732bb6c12ca6e0f5335a37a324..2d1d22eec750de937a9a2f615064d71f92e136e4 100644 (file)
@@ -34,6 +34,7 @@
 #define IFI_CANFD_STCMD_LOOPBACK               BIT(18)
 #define IFI_CANFD_STCMD_DISABLE_CANFD          BIT(24)
 #define IFI_CANFD_STCMD_ENABLE_ISO             BIT(25)
+#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING  BIT(26)
 #define IFI_CANFD_STCMD_NORMAL_MODE            ((u32)BIT(31))
 
 #define IFI_CANFD_RXSTCMD                      0x4
@@ -51,7 +52,8 @@
 #define IFI_CANFD_TXSTCMD_OVERFLOW             BIT(13)
 
 #define IFI_CANFD_INTERRUPT                    0xc
-#define IFI_CANFD_INTERRUPT_ERROR_WARNING      ((u32)BIT(1))
+#define IFI_CANFD_INTERRUPT_ERROR_WARNING      BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_COUNTER      BIT(10)
 #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY       BIT(16)
 #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE      BIT(22)
 #define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY      BIT(24)
 #define IFI_CANFD_TIME_TIMEB_OFF               0
 #define IFI_CANFD_TIME_TIMEA_OFF               8
 #define IFI_CANFD_TIME_PRESCALE_OFF            16
-#define IFI_CANFD_TIME_SJW_OFF_ISO             25
-#define IFI_CANFD_TIME_SJW_OFF_BOSCH           28
-#define IFI_CANFD_TIME_SET_SJW_BOSCH           BIT(6)
-#define IFI_CANFD_TIME_SET_TIMEB_BOSCH         BIT(7)
-#define IFI_CANFD_TIME_SET_PRESC_BOSCH         BIT(14)
-#define IFI_CANFD_TIME_SET_TIMEA_BOSCH         BIT(15)
+#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8         25
+#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6                28
+#define IFI_CANFD_TIME_SET_SJW_4_12_6_6                BIT(6)
+#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6      BIT(7)
+#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6      BIT(14)
+#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6      BIT(15)
 
 #define IFI_CANFD_TDELAY                       0x1c
 
 
 #define IFI_CANFD_RES1                         0x40
 
-#define IFI_CANFD_RES2                         0x44
+#define IFI_CANFD_ERROR_CTR                    0x44
+#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC       0x21302899
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST     BIT(0)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST    BIT(1)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST   BIT(2)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST   BIT(3)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST  BIT(4)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST    BIT(5)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST   BIT(6)
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL       BIT(8)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL      BIT(9)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL     BIT(10)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL     BIT(11)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL    BIT(12)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL      BIT(13)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL     BIT(14)
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET 16
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK   0xff
+#define IFI_CANFD_ERROR_CTR_ER_RESET           BIT(30)
+#define IFI_CANFD_ERROR_CTR_ER_ENABLE          ((u32)BIT(31))
 
 #define IFI_CANFD_PAR                          0x48
 
@@ -196,6 +217,8 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
        if (enable) {
                enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
                        IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+               if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+                       enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
        }
 
        writel(IFI_CANFD_IRQMASK_SET_ERR |
@@ -334,6 +357,68 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
        return 1;
 }
 
+static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+{
+       struct ifi_canfd_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
+                           IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
+                           IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
+                           IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST |
+                           IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST |
+                           IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST |
+                           IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST;
+
+       if (!(errctr & errmask))        /* No error happened. */
+               return 0;
+
+       priv->can.can_stats.bus_error++;
+       stats->rx_errors++;
+
+       /* Propagate the error condition to the CAN stack. */
+       skb = alloc_can_err_skb(ndev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       /* Read the error counter register and check for new errors. */
+       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
+               cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
+               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
+               cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
+               cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
+               cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+
+       if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
+               cf->data[2] |= CAN_ERR_PROT_FORM;
+
+       /* Reset the error counter, ack the IRQ and re-enable the counter. */
+       writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+       writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER,
+              priv->base + IFI_CANFD_INTERRUPT);
+       writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
+
+       return 1;
+}
+
 static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
                                      struct can_berr_counter *bec)
 {
@@ -469,6 +554,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
 
        u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
        u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+       u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
 
        /* Handle bus state changes */
        if ((stcmd & stcmd_state_mask) ||
@@ -479,6 +565,10 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
        if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
                work_done += ifi_canfd_handle_lost_msg(ndev);
 
+       /* Handle lec errors on the bus */
+       if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+               work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+
        /* Handle normal messages on RX */
        if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
                work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
@@ -497,11 +587,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
        struct net_device_stats *stats = &ndev->stats;
        const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
-                               IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+                               IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+                               IFI_CANFD_INTERRUPT_ERROR_WARNING |
+                               IFI_CANFD_INTERRUPT_ERROR_COUNTER;
        const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
                                IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
-       const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ |
-                                  IFI_CANFD_INTERRUPT_ERROR_WARNING);
+       const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
+                                        IFI_CANFD_INTERRUPT_ERROR_WARNING));
        u32 isr;
 
        isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -513,44 +605,34 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
        /* Clear all pending interrupts but ErrWarn */
        writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
 
-       /* RX IRQ, start NAPI */
+       /* RX IRQ or bus warning, start NAPI */
        if (isr & rx_irq_mask) {
                ifi_canfd_irq_enable(ndev, 0);
                napi_schedule(&priv->napi);
        }
 
        /* TX IRQ */
-       if (isr & tx_irq_mask) {
+       if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
                stats->tx_bytes += can_get_echo_skb(ndev, 0);
                stats->tx_packets++;
                can_led_event(ndev, CAN_LED_EVENT_TX);
-               netif_wake_queue(ndev);
        }
 
+       if (isr & tx_irq_mask)
+               netif_wake_queue(ndev);
+
        return IRQ_HANDLED;
 }
 
 static const struct can_bittiming_const ifi_canfd_bittiming_const = {
        .name           = KBUILD_MODNAME,
        .tseg1_min      = 1,    /* Time segment 1 = prop_seg + phase_seg1 */
-       .tseg1_max      = 64,
-       .tseg2_min      = 2,    /* Time segment 2 = phase_seg2 */
-       .tseg2_max      = 64,
-       .sjw_max        = 16,
-       .brp_min        = 2,
-       .brp_max        = 256,
-       .brp_inc        = 1,
-};
-
-static const struct can_bittiming_const ifi_canfd_data_bittiming_const = {
-       .name           = KBUILD_MODNAME,
-       .tseg1_min      = 1,    /* Time segment 1 = prop_seg + phase_seg1 */
-       .tseg1_max      = 64,
+       .tseg1_max      = 256,
        .tseg2_min      = 2,    /* Time segment 2 = phase_seg2 */
-       .tseg2_max      = 64,
-       .sjw_max        = 16,
+       .tseg2_max      = 256,
+       .sjw_max        = 128,
        .brp_min        = 2,
-       .brp_max        = 256,
+       .brp_max        = 512,
        .brp_inc        = 1,
 };
 
@@ -560,19 +642,6 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
        const struct can_bittiming *bt = &priv->can.bittiming;
        const struct can_bittiming *dbt = &priv->can.data_bittiming;
        u16 brp, sjw, tseg1, tseg2;
-       u32 noniso_arg = 0;
-       u32 time_off;
-
-       if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
-           !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) {
-               time_off = IFI_CANFD_TIME_SJW_OFF_ISO;
-       } else {
-               noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH |
-                            IFI_CANFD_TIME_SET_TIMEA_BOSCH |
-                            IFI_CANFD_TIME_SET_PRESC_BOSCH |
-                            IFI_CANFD_TIME_SET_SJW_BOSCH;
-               time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH;
-       }
 
        /* Configure bit timing */
        brp = bt->brp - 2;
@@ -582,8 +651,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
        writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
               (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
               (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
-              (sjw << time_off) |
-              noniso_arg,
+              (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
               priv->base + IFI_CANFD_TIME);
 
        /* Configure data bit timing */
@@ -594,8 +662,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
        writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
               (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
               (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
-              (sjw << time_off) |
-              noniso_arg,
+              (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
               priv->base + IFI_CANFD_FTIME);
 }
 
@@ -640,7 +707,8 @@ static void ifi_canfd_start(struct net_device *ndev)
 
        /* Reset the IP */
        writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
-       writel(0, priv->base + IFI_CANFD_STCMD);
+       writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING,
+              priv->base + IFI_CANFD_STCMD);
 
        ifi_canfd_set_bittiming(ndev);
        ifi_canfd_set_filters(ndev);
@@ -659,7 +727,8 @@ static void ifi_canfd_start(struct net_device *ndev)
        writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
               priv->base + IFI_CANFD_INTERRUPT);
 
-       stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE;
+       stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE |
+               IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING;
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
                stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
@@ -667,16 +736,23 @@ static void ifi_canfd_start(struct net_device *ndev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
                stcmd |= IFI_CANFD_STCMD_LOOPBACK;
 
-       if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+           !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
                stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
 
-       if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)))
+       if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
                stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        ifi_canfd_irq_enable(ndev, 1);
 
+       /* Unlock, reset and enable the error counter. */
+       writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC,
+              priv->base + IFI_CANFD_ERROR_CTR);
+       writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+       writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
        /* Enable controller */
        writel(stcmd, priv->base + IFI_CANFD_STCMD);
 }
@@ -685,6 +761,10 @@ static void ifi_canfd_stop(struct net_device *ndev)
 {
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
 
+       /* Reset and disable the error counter. */
+       writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+       writel(0, priv->base + IFI_CANFD_ERROR_CTR);
+
        /* Reset the IP */
        writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
 
@@ -877,7 +957,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
        priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
 
        priv->can.bittiming_const       = &ifi_canfd_bittiming_const;
-       priv->can.data_bittiming_const  = &ifi_canfd_data_bittiming_const;
+       priv->can.data_bittiming_const  = &ifi_canfd_bittiming_const;
        priv->can.do_set_mode           = ifi_canfd_set_mode;
        priv->can.do_get_berr_counter   = ifi_canfd_get_berr_counter;
 
@@ -888,7 +968,8 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
        priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
                                       CAN_CTRLMODE_LISTENONLY |
                                       CAN_CTRLMODE_FD |
-                                      CAN_CTRLMODE_FD_NON_ISO;
+                                      CAN_CTRLMODE_FD_NON_ISO |
+                                      CAN_CTRLMODE_BERR_REPORTING;
 
        platform_set_drvdata(pdev, ndev);
        SET_NETDEV_DEV(ndev, dev);
index 5d04f5464faf29a8b1c99dcc8bae86becbd1c485..f13bb8d9bb8429e46be9d950df27e016ce450d26 100644 (file)
@@ -84,6 +84,7 @@
 #define MSG_COFFREQ            0x42
 #define MSG_CONREQ             0x43
 #define MSG_CCONFREQ           0x47
+#define MSG_NMTS               0xb0
 #define MSG_LMTS               0xb4
 
 /*
 
 #define ICAN3_CAN_DLC_MASK     0x0f
 
+/* Janz ICAN3 NMTS subtypes */
+#define NMTS_CREATE_NODE_REQ   0x0
+#define NMTS_SLAVE_STATE_IND   0x8
+#define NMTS_SLAVE_EVENT_IND   0x9
+
+/* Janz ICAN3 LMTS subtypes */
+#define LMTS_BUSON_REQ         0x0
+#define LMTS_BUSOFF_REQ                0x1
+#define LMTS_CAN_CONF_REQ      0x2
+
+/* Janz ICAN3 NMTS Event indications */
+#define NE_LOCAL_OCCURRED      0x3
+#define NE_LOCAL_RESOLVED      0x2
+#define NE_REMOTE_OCCURRED     0xc
+#define NE_REMOTE_RESOLVED     0x8
+
 /*
  * SJA1000 Status and Error Register Definitions
  *
@@ -800,21 +817,41 @@ static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
                return ican3_send_msg(mod, &msg);
 
        } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+               /* bittiming + can-on/off request */
                memset(&msg, 0, sizeof(msg));
                msg.spec = MSG_LMTS;
                if (on) {
                        msg.len = cpu_to_le16(4);
-                       msg.data[0] = 0;
+                       msg.data[0] = LMTS_BUSON_REQ;
                        msg.data[1] = 0;
                        msg.data[2] = btr0;
                        msg.data[3] = btr1;
                } else {
                        msg.len = cpu_to_le16(2);
-                       msg.data[0] = 1;
+                       msg.data[0] = LMTS_BUSOFF_REQ;
                        msg.data[1] = 0;
                }
+               res = ican3_send_msg(mod, &msg);
+               if (res)
+                       return res;
 
-               return ican3_send_msg(mod, &msg);
+               if (on) {
+                       /* create NMT Slave Node for error processing
+                        *   class 2 (with error capability, see CiA/DS203-1)
+                        *   id    1
+                        *   name  locnod1 (must be exactly 7 bytes)
+                        */
+                       memset(&msg, 0, sizeof(msg));
+                       msg.spec = MSG_NMTS;
+                       msg.len = cpu_to_le16(11);
+                       msg.data[0] = NMTS_CREATE_NODE_REQ;
+                       msg.data[1] = 0;
+                       msg.data[2] = 2;                 /* node class */
+                       msg.data[3] = 1;                 /* node id */
+                       strcpy(msg.data + 4, "locnod1"); /* node name  */
+                       return ican3_send_msg(mod, &msg);
+               }
+               return 0;
        }
        return -ENOTSUPP;
 }
@@ -849,12 +886,23 @@ static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
 {
        struct ican3_msg msg;
 
-       memset(&msg, 0, sizeof(msg));
-       msg.spec = MSG_CCONFREQ;
-       msg.len = cpu_to_le16(2);
-       msg.data[0] = 0x00;
-       msg.data[1] = quota;
-
+       if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+               memset(&msg, 0, sizeof(msg));
+               msg.spec = MSG_CCONFREQ;
+               msg.len = cpu_to_le16(2);
+               msg.data[0] = 0x00;
+               msg.data[1] = quota;
+       } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+               memset(&msg, 0, sizeof(msg));
+               msg.spec = MSG_LMTS;
+               msg.len = cpu_to_le16(4);
+               msg.data[0] = LMTS_CAN_CONF_REQ;
+               msg.data[1] = 0x00;
+               msg.data[2] = 0x00;
+               msg.data[3] = quota;
+       } else {
+               return -ENOTSUPP;
+       }
        return ican3_send_msg(mod, &msg);
 }
 
@@ -1150,6 +1198,41 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
        }
 }
 
+/* Handle NMTS Slave Event Indication Messages from the firmware */
+static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+       u16 subspec;
+
+       subspec = msg->data[0] + msg->data[1] * 0x100;
+       if (subspec == NMTS_SLAVE_EVENT_IND) {
+               switch (msg->data[2]) {
+               case NE_LOCAL_OCCURRED:
+               case NE_LOCAL_RESOLVED:
+                       /* now follows the same message as Raw ICANOS CEVTIND
+                        * shift the data at the same place and call this method
+                        */
+                       le16_add_cpu(&msg->len, -3);
+                       memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len));
+                       ican3_handle_cevtind(mod, msg);
+                       break;
+               case NE_REMOTE_OCCURRED:
+               case NE_REMOTE_RESOLVED:
+                       /* should not occurre, ignore */
+                       break;
+               default:
+                       netdev_warn(mod->ndev, "unknown NMTS event indication %x\n",
+                                   msg->data[2]);
+                       break;
+               }
+       } else if (subspec == NMTS_SLAVE_STATE_IND) {
+               /* ignore state indications */
+       } else {
+               netdev_warn(mod->ndev, "unhandled NMTS indication %x\n",
+                           subspec);
+               return;
+       }
+}
+
 static void ican3_handle_unknown_message(struct ican3_dev *mod,
                                        struct ican3_msg *msg)
 {
@@ -1179,6 +1262,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
        case MSG_INQUIRY:
                ican3_handle_inquiry(mod, msg);
                break;
+       case MSG_NMTS:
+               ican3_handle_nmtsind(mod, msg);
+               break;
        default:
                ican3_handle_unknown_message(mod, msg);
                break;
index 39cf911f7a1e3c364d7818bdc6bcd162a6490951..195f15edb32e3cf03b552d7cdb9a98d76f565591 100644 (file)
@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
        priv->can.do_get_berr_counter = m_can_get_berr_counter;
 
        /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
-       priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+       can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
 
        /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
        priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
index e36b7400d5cceaf12b416d39a0d546baa52e73a5..acb708fc14636e76959c068ec249682ac64564de 100644 (file)
@@ -276,7 +276,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
        out_8(&regs->cantflg, 1 << buf_id);
 
        if (!test_bit(F_TX_PROGRESS, &priv->flags))
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
 
        list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
 
@@ -469,7 +469,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
                        clear_bit(F_TX_PROGRESS, &priv->flags);
                        priv->cur_pri = 0;
                } else {
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                }
 
                if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
index 8836a7485c81c80380f0b68d0cd005704d6c742d..3eb7430dffbf1378df8c4c9c40f92a99e06e879d 100644 (file)
@@ -39,6 +39,7 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
 MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
                        "Adlink PCI-7841/cPCI-7841 SE, "
                        "Marathon CAN-bus-PCI, "
+                       "Marathon CAN-bus-PCIe, "
                        "TEWS TECHNOLOGIES TPMC810, "
                        "esd CAN-PCI/CPCI/PCI104/200, "
                        "esd CAN-PCI/PMC/266, "
@@ -133,6 +134,7 @@ struct plx_pci_card {
 #define IXXAT_PCI_SUB_SYS_ID           0x2540
 
 #define MARATHON_PCI_DEVICE_ID         0x2715
+#define MARATHON_PCIE_DEVICE_ID                0x3432
 
 #define TEWS_PCI_VENDOR_ID             0x1498
 #define TEWS_PCI_DEVICE_ID_TMPC810     0x032A
@@ -141,8 +143,9 @@ struct plx_pci_card {
 #define CTI_PCI_DEVICE_ID_CRG001       0x0900
 
 static void plx_pci_reset_common(struct pci_dev *pdev);
-static void plx_pci_reset_marathon(struct pci_dev *pdev);
 static void plx9056_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev);
 
 struct plx_pci_channel_map {
        u32 bar;
@@ -215,14 +218,22 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat = {
        /* based on PLX9050 */
 };
 
-static struct plx_pci_card_info plx_pci_card_info_marathon = {
+static struct plx_pci_card_info plx_pci_card_info_marathon_pci = {
        "Marathon CAN-bus-PCI", 2,
        PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
        {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
-       &plx_pci_reset_marathon
+       &plx_pci_reset_marathon_pci
        /* based on PLX9052 */
 };
 
+static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = {
+       "Marathon CAN-bus-PCIe", 2,
+       PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+       {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} },
+       &plx_pci_reset_marathon_pcie
+       /* based on PEX8311 */
+};
+
 static struct plx_pci_card_info plx_pci_card_info_tews = {
        "TEWS TECHNOLOGIES TPMC810", 2,
        PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -316,7 +327,14 @@ static const struct pci_device_id plx_pci_tbl[] = {
                PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0,
-               (kernel_ulong_t)&plx_pci_card_info_marathon
+               (kernel_ulong_t)&plx_pci_card_info_marathon_pci
+       },
+       {
+               /* Marathon CAN-bus-PCIe card */
+               PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0,
+               (kernel_ulong_t)&plx_pci_card_info_marathon_pcie
        },
        {
                /* TEWS TECHNOLOGIES TPMC810 card */
@@ -437,8 +455,8 @@ static void plx9056_pci_reset_common(struct pci_dev *pdev)
        iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
 };
 
-/* Special reset function for Marathon card */
-static void plx_pci_reset_marathon(struct pci_dev *pdev)
+/* Special reset function for Marathon CAN-bus-PCI card */
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev)
 {
        void __iomem *reset_addr;
        int i;
@@ -460,6 +478,34 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
        }
 }
 
+/* Special reset function for Marathon CAN-bus-PCIe card */
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
+{
+       void __iomem *addr;
+       void __iomem *reset_addr;
+       int i;
+
+       plx9056_pci_reset_common(pdev);
+
+       for (i = 0; i < 2; i++) {
+               struct plx_pci_channel_map *chan_map =
+                       &plx_pci_card_info_marathon_pcie.chan_map_tbl[i];
+               addr = pci_iomap(pdev, chan_map->bar, chan_map->size);
+               if (!addr) {
+                       dev_err(&pdev->dev, "Failed to remap reset "
+                               "space %d (BAR%d)\n", i, chan_map->bar);
+               } else {
+                       /* reset the SJA1000 chip */
+                       #define MARATHON_PCIE_RESET_OFFSET 32
+                       reset_addr = addr + chan_map->offset +
+                                    MARATHON_PCIE_RESET_OFFSET;
+                       iowrite8(0x1, reset_addr);
+                       udelay(100);
+                       pci_iounmap(pdev, addr);
+               }
+       }
+}
+
 static void plx_pci_del_card(struct pci_dev *pdev)
 {
        struct plx_pci_card *card = pci_get_drvdata(pdev);
@@ -486,7 +532,8 @@ static void plx_pci_del_card(struct pci_dev *pdev)
         * Disable interrupts from PCI-card and disable local
         * interrupts
         */
-       if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+       if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+           pdev->device != MARATHON_PCIE_DEVICE_ID)
                iowrite32(0x0, card->conf_addr + PLX_INTCSR);
        else
                iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
@@ -619,7 +666,8 @@ static int plx_pci_add_card(struct pci_dev *pdev,
         * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
         * Local_2 interrupts from the SJA1000 chips
         */
-       if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+       if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+           pdev->device != MARATHON_PCIE_DEVICE_ID) {
                val = ioread32(card->conf_addr + PLX_INTCSR);
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
                        val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
index 8dda3b703d39a1e82ce7fc6a63707e883d235313..9f107798f904b4e6771930d469b43082419df5d6 100644 (file)
@@ -438,6 +438,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
 
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
 
+               /* set error type */
                switch (ecc & ECC_MASK) {
                case ECC_BIT:
                        cf->data[2] |= CAN_ERR_PROT_BIT;
@@ -449,9 +450,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
                        break;
                default:
-                       cf->data[3] = ecc & ECC_SEG;
                        break;
                }
+
+               /* set error location */
+               cf->data[3] = ecc & ECC_SEG;
+
                /* Error occurred during transmission? */
                if ((ecc & ECC_DIR) == 0)
                        cf->data[2] |= CAN_ERR_PROT_TX;
index 74a7dfecee2783ac609b9fefbd32f7817b48c43c..cf36d26ef002262aa43c37439d17d57069ed8780 100644 (file)
@@ -961,7 +961,8 @@ static int mcp251x_open(struct net_device *net)
                goto open_unlock;
        }
 
-       priv->wq = create_freezable_workqueue("mcp251x_wq");
+       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+                                  0);
        INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
        INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
index 3400fd1cada7152dbd0ad2c5df4626b52c8bd1a0..71f0e791355b9c6e51218c6829a89cbc17ba607f 100644 (file)
@@ -521,7 +521,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
        if (urb->status)
                netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        /* transmission complete interrupt */
        netdev->stats.tx_packets++;
@@ -835,7 +835,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
                        stats->tx_dropped++;
                }
        } else {
-               netdev->trans_start = jiffies;
+               netif_trans_update(netdev);
 
                /* Slow down tx path */
                if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
index 113e64fcd73be9a16635f0029ed3c6d0c2a17306..784a9002fbb9309291b443363967101d54e2a97f 100644 (file)
@@ -480,7 +480,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
        if (urb->status)
                netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 }
 
 static ssize_t show_firmware(struct device *d,
@@ -820,7 +820,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
                goto releasebuf;
        }
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        /*
         * Release our reference to this URB, the USB core will eventually free
index cbc99d5649afa3877158a0ac37a50b8865a7e8ac..1556d428623531026ef58fe15260ff2e88a04372 100644 (file)
@@ -950,7 +950,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
 }
 
 static const struct usb_device_id gs_usb_table[] = {
-       {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+       { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
+                                     USB_GSUSB_1_PRODUCT_ID, 0) },
        {} /* Terminating entry */
 };
 
index 5a2e341a6d1ea7b2e2586414fa41592b7ad78099..bfb91d8fa46056b9fc9d7d51dfd0dfc0ad251d89 100644 (file)
@@ -274,7 +274,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
                netdev->stats.tx_bytes += context->data_len;
 
                /* prevent tx timeout */
-               netdev->trans_start = jiffies;
+               netif_trans_update(netdev);
                break;
 
        default:
@@ -373,7 +373,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
                        stats->tx_dropped++;
                }
        } else {
-               netdev->trans_start = jiffies;
+               netif_trans_update(netdev);
 
                /* slow down tx path */
                if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS)
index 64c016a99af80b0bf20114b98193e4544d598630..221f5f011ff9e6bb64d07d36aef80bbd749f14ed 100644 (file)
@@ -1106,7 +1106,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        myNextTxDesc->skb = skb;
 
-       dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+       netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
 
        e100_hardware_send_packet(np, buf, skb->len);
 
index 90ba003d8fdf307784a35ee76dc61fda55dc8cf6..200663c43ce90dbea7ad8f74fae41415632cded8 100644 (file)
@@ -1,10 +1,6 @@
 menu "Distributed Switch Architecture drivers"
        depends on HAVE_NET_DSA
 
-config NET_DSA_MV88E6XXX
-       tristate
-       default n
-
 config NET_DSA_MV88E6060
        tristate "Marvell 88E6060 ethernet switch chip support"
        depends on NET_DSA
@@ -13,46 +9,13 @@ config NET_DSA_MV88E6060
          This enables support for the Marvell 88E6060 ethernet switch
          chip.
 
-config NET_DSA_MV88E6XXX_NEED_PPU
-       bool
-       default n
-
-config NET_DSA_MV88E6131
-       tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
-       depends on NET_DSA
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_MV88E6XXX_NEED_PPU
-       select NET_DSA_TAG_DSA
-       ---help---
-         This enables support for the Marvell 88E6085/6095/6095F/6131
-         ethernet switch chips.
-
-config NET_DSA_MV88E6123
-       tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
-       depends on NET_DSA
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_TAG_EDSA
-       ---help---
-         This enables support for the Marvell 88E6123/6161/6165
-         ethernet switch chips.
-
-config NET_DSA_MV88E6171
-       tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
-       depends on NET_DSA
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_TAG_EDSA
-       ---help---
-         This enables support for the Marvell 88E6171/6175/6350/6351
-         ethernet switches chips.
-
-config NET_DSA_MV88E6352
-       tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
+config NET_DSA_MV88E6XXX
+       tristate "Marvell 88E6xxx Ethernet switch chip support"
        depends on NET_DSA
-       select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
-         This enables support for the Marvell 88E6172, 88E6176, 88E6320,
-         88E6321 and 88E6352 ethernet switch chips.
+         This enables support for most of the Marvell 88E6xxx models of
+         Ethernet switch chips, except 88E6060.
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
index a6e09939be65ae04eb2d8a4aaa689347334b9b74..76b751dd9efd32585d224fc65b353d93d83e8167 100644 (file)
@@ -1,16 +1,3 @@
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
-mv88e6xxx_drv-y += mv88e6xxx.o
-ifdef CONFIG_NET_DSA_MV88E6123
-mv88e6xxx_drv-y += mv88e6123.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6131
-mv88e6xxx_drv-y += mv88e6131.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6352
-mv88e6xxx_drv-y += mv88e6352.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6171
-mv88e6xxx_drv-y += mv88e6171.o
-endif
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
 obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm_sf2.o
index 780f228765388c196398fd5f7aef6bb2ca5cd754..10ddd5a5dfb6ec9144f2ff0de02bea7b9f98fc92 100644 (file)
@@ -135,8 +135,17 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
        return BCM_SF2_STATS_SIZE;
 }
 
-static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
+static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
+                                       struct device *host_dev, int sw_addr,
+                                       void **_priv)
 {
+       struct bcm_sf2_priv *priv;
+
+       priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return NULL;
+       *_priv = priv;
+
        return "Broadcom Starfighter 2";
 }
 
@@ -151,7 +160,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
         * the same VLAN.
         */
        for (i = 0; i < priv->hw_params.num_ports; i++) {
-               if (!((1 << i) & ds->phys_port_mask))
+               if (!((1 << i) & ds->enabled_port_mask))
                        continue;
 
                reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
@@ -940,8 +949,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
        /* All the interesting properties are at the parent device_node
         * level
         */
-       dn = ds->pd->of_node->parent;
-       bcm_sf2_identify_ports(priv, ds->pd->of_node);
+       dn = ds->cd->of_node->parent;
+       bcm_sf2_identify_ports(priv, ds->cd->of_node);
 
        priv->irq0 = irq_of_parse_and_map(dn, 0);
        priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -1000,7 +1009,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
        /* Enable all valid ports and disable those unused */
        for (port = 0; port < priv->hw_params.num_ports; port++) {
                /* IMP port receives special treatment */
-               if ((1 << port) & ds->phys_port_mask)
+               if ((1 << port) & ds->enabled_port_mask)
                        bcm_sf2_port_setup(ds, port, NULL);
                else if (dsa_is_cpu_port(ds, port))
                        bcm_sf2_imp_setup(ds, port);
@@ -1013,11 +1022,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
         * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
         * that we can use the regular SWITCH_MDIO master controller instead.
         *
-        * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
-        * to have a 1:1 mapping between Port address and PHY address in order
-        * to utilize the slave_mii_bus instance to read from Port PHYs. This is
-        * not what we want here, so we initialize phys_mii_mask 0 to always
-        * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
+        * By default, DSA initializes ds->phys_mii_mask to
+        * ds->enabled_port_mask to have a 1:1 mapping between Port address
+        * and PHY address in order to utilize the slave_mii_bus instance to
+        * read from Port PHYs. This is not what we want here, so we
+        * initialize phys_mii_mask 0 to always utilize the "master" MDIO
+        * bus backed by the "mdio-unimac" driver.
         */
        if (of_machine_is_compatible("brcm,bcm7445d0"))
                ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
@@ -1275,7 +1285,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
         * bcm_sf2_sw_setup
         */
        for (port = 0; port < DSA_MAX_PORTS; port++) {
-               if ((1 << port) & ds->phys_port_mask ||
+               if ((1 << port) & ds->enabled_port_mask ||
                    dsa_is_cpu_port(ds, port))
                        bcm_sf2_port_disable(ds, port, NULL);
        }
@@ -1299,7 +1309,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
                bcm_sf2_gphy_enable_set(ds, true);
 
        for (port = 0; port < DSA_MAX_PORTS; port++) {
-               if ((1 << port) & ds->phys_port_mask)
+               if ((1 << port) & ds->enabled_port_mask)
                        bcm_sf2_port_setup(ds, port, NULL);
                else if (dsa_is_cpu_port(ds, port))
                        bcm_sf2_imp_setup(ds, port);
@@ -1362,8 +1372,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
 
 static struct dsa_switch_driver bcm_sf2_switch_driver = {
        .tag_protocol           = DSA_TAG_PROTO_BRCM,
-       .priv_size              = sizeof(struct bcm_sf2_priv),
-       .probe                  = bcm_sf2_sw_probe,
+       .probe                  = bcm_sf2_sw_drv_probe,
        .setup                  = bcm_sf2_sw_setup,
        .set_addr               = bcm_sf2_sw_set_addr,
        .get_phy_flags          = bcm_sf2_sw_get_phy_flags,
index 0527f485c3dc7c4d1cc689a7707820af993bc68f..e36b40886bd810c0c517b8ac3f2e320d9581ed9a 100644 (file)
 
 static int reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+       struct mv88e6060_priv *priv = ds_to_priv(ds);
 
-       if (bus == NULL)
-               return -EINVAL;
-
-       return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
+       return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)                                    \
@@ -40,12 +37,9 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
 
 static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
-       if (bus == NULL)
-               return -EINVAL;
+       struct mv88e6060_priv *priv = ds_to_priv(ds);
 
-       return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
+       return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)                              \
@@ -57,14 +51,10 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
                        return __ret;                           \
        })
 
-static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
+static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
        int ret;
 
-       if (bus == NULL)
-               return NULL;
-
        ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
                if (ret == PORT_SWITCH_ID_6060)
@@ -79,6 +69,27 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
        return NULL;
 }
 
+static const char *mv88e6060_drv_probe(struct device *dsa_dev,
+                                      struct device *host_dev, int sw_addr,
+                                      void **_priv)
+{
+       struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+       struct mv88e6060_priv *priv;
+       const char *name;
+
+       name = mv88e6060_get_name(bus, sw_addr);
+       if (name) {
+               priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+               if (!priv)
+                       return NULL;
+               *_priv = priv;
+               priv->bus = bus;
+               priv->sw_addr = sw_addr;
+       }
+
+       return name;
+}
+
 static int mv88e6060_switch_reset(struct dsa_switch *ds)
 {
        int i;
@@ -159,7 +170,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
        REG_WRITE(addr, PORT_VLAN_MAP,
                  ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
                   (dsa_is_cpu_port(ds, p) ?
-                       ds->phys_port_mask :
+                       ds->enabled_port_mask :
                        BIT(ds->dst->cpu_port)));
 
        /* Port Association Vector: when learning source addresses
@@ -174,8 +185,8 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6060_setup(struct dsa_switch *ds)
 {
-       int i;
        int ret;
+       int i;
 
        ret = mv88e6060_switch_reset(ds);
        if (ret < 0)
@@ -238,7 +249,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
 
 static struct dsa_switch_driver mv88e6060_switch_driver = {
        .tag_protocol   = DSA_TAG_PROTO_TRAILER,
-       .probe          = mv88e6060_probe,
+       .probe          = mv88e6060_drv_probe,
        .setup          = mv88e6060_setup,
        .set_addr       = mv88e6060_set_addr,
        .phy_read       = mv88e6060_phy_read,
index cc9b2ed4aff493e2b2537267420c2506c8646048..10249bd16292aadac4f0dc5a835514b0de0c0fc6 100644 (file)
 #define GLOBAL_ATU_MAC_23      0x0e
 #define GLOBAL_ATU_MAC_45      0x0f
 
+struct mv88e6060_priv {
+       /* MDIO bus and address on bus to use. When in single chip
+        * mode, address is 0, and the switch uses multiple addresses
+        * on the bus.  When in multi-chip mode, the switch uses a
+        * single address which contains two registers used for
+        * indirect access to more registers.
+        */
+       struct mii_bus *bus;
+       int sw_addr;
+};
+
 #endif
diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c
deleted file mode 100644 (file)
index 69a6f79..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6123_table[] = {
-       { PORT_SWITCH_ID_6123, "Marvell 88E6123" },
-       { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" },
-       { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" },
-       { PORT_SWITCH_ID_6161, "Marvell 88E6161" },
-       { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" },
-       { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" },
-       { PORT_SWITCH_ID_6165, "Marvell 88E6165" },
-       { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" },
-       { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" },
-};
-
-static char *mv88e6123_probe(struct device *host_dev, int sw_addr)
-{
-       return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table,
-                                    ARRAY_SIZE(mv88e6123_table));
-}
-
-static int mv88e6123_setup_global(struct dsa_switch *ds)
-{
-       u32 upstream_port = dsa_upstream_port(ds);
-       int ret;
-       u32 reg;
-
-       ret = mv88e6xxx_setup_global(ds);
-       if (ret)
-               return ret;
-
-       /* Disable the PHY polling unit (since there won't be any
-        * external PHYs to poll), don't discard packets with
-        * excessive collisions, and mask all interrupt sources.
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
-
-       /* Configure the upstream port, and configure the upstream
-        * port as the port to which ingress and egress monitor frames
-        * are to be sent.
-        */
-       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
-       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-       /* Disable remote management for now, and set the switch's
-        * DSA device number.
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
-       return 0;
-}
-
-static int mv88e6123_setup(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       ret = mv88e6xxx_setup_common(ds);
-       if (ret < 0)
-               return ret;
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6123:
-               ps->num_ports = 3;
-               break;
-       case PORT_SWITCH_ID_6161:
-       case PORT_SWITCH_ID_6165:
-               ps->num_ports = 6;
-               break;
-       default:
-               return -ENODEV;
-       }
-
-       ret = mv88e6xxx_switch_reset(ds, false);
-       if (ret < 0)
-               return ret;
-
-       ret = mv88e6123_setup_global(ds);
-       if (ret < 0)
-               return ret;
-
-       return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6123_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_EDSA,
-       .priv_size              = sizeof(struct mv88e6xxx_priv_state),
-       .probe                  = mv88e6123_probe,
-       .setup                  = mv88e6123_setup,
-       .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6xxx_phy_read,
-       .phy_write              = mv88e6xxx_phy_write,
-       .get_strings            = mv88e6xxx_get_strings,
-       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
-       .get_sset_count         = mv88e6xxx_get_sset_count,
-       .adjust_link            = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6xxx_get_temp,
-#endif
-       .get_regs_len           = mv88e6xxx_get_regs_len,
-       .get_regs               = mv88e6xxx_get_regs,
-};
-
-MODULE_ALIAS("platform:mv88e6123");
-MODULE_ALIAS("platform:mv88e6161");
-MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
deleted file mode 100644 (file)
index 2407028..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6131_table[] = {
-       { PORT_SWITCH_ID_6085, "Marvell 88E6085" },
-       { PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" },
-       { PORT_SWITCH_ID_6131, "Marvell 88E6131" },
-       { PORT_SWITCH_ID_6131_B2, "Marvell 88E6131 (B2)" },
-       { PORT_SWITCH_ID_6185, "Marvell 88E6185" },
-};
-
-static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
-{
-       return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table,
-                                    ARRAY_SIZE(mv88e6131_table));
-}
-
-static int mv88e6131_setup_global(struct dsa_switch *ds)
-{
-       u32 upstream_port = dsa_upstream_port(ds);
-       int ret;
-       u32 reg;
-
-       ret = mv88e6xxx_setup_global(ds);
-       if (ret)
-               return ret;
-
-       /* Enable the PHY polling unit, don't discard packets with
-        * excessive collisions, use a weighted fair queueing scheme
-        * to arbitrate between packet queues, set the maximum frame
-        * size to 1632, and mask all interrupt sources.
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
-
-       /* Set the VLAN ethertype to 0x8100. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
-
-       /* Disable ARP mirroring, and configure the upstream port as
-        * the port to which ingress and egress monitor frames are to
-        * be sent.
-        */
-       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-               GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
-       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-       /* Disable cascade port functionality unless this device
-        * is used in a cascade configuration, and set the switch's
-        * DSA device number.
-        */
-       if (ds->dst->pd->nr_chips > 1)
-               REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
-                         GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
-                         (ds->index & 0x1f));
-       else
-               REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
-                         GLOBAL_CONTROL_2_NO_CASCADE |
-                         (ds->index & 0x1f));
-
-       /* Force the priority of IGMP/MLD snoop frames and ARP frames
-        * to the highest setting.
-        */
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
-                 GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
-                 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
-                 GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
-                 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
-
-       return 0;
-}
-
-static int mv88e6131_setup(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       ret = mv88e6xxx_setup_common(ds);
-       if (ret < 0)
-               return ret;
-
-       mv88e6xxx_ppu_state_init(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6085:
-       case PORT_SWITCH_ID_6185:
-               ps->num_ports = 10;
-               break;
-       case PORT_SWITCH_ID_6095:
-               ps->num_ports = 11;
-               break;
-       case PORT_SWITCH_ID_6131:
-       case PORT_SWITCH_ID_6131_B2:
-               ps->num_ports = 8;
-               break;
-       default:
-               return -ENODEV;
-       }
-
-       ret = mv88e6xxx_switch_reset(ds, false);
-       if (ret < 0)
-               return ret;
-
-       ret = mv88e6131_setup_global(ds);
-       if (ret < 0)
-               return ret;
-
-       return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       if (port >= 0 && port < ps->num_ports)
-               return port;
-
-       return -EINVAL;
-}
-
-static int
-mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       int addr = mv88e6131_port_to_phy_addr(ds, port);
-
-       if (addr < 0)
-               return addr;
-
-       return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
-}
-
-static int
-mv88e6131_phy_write(struct dsa_switch *ds,
-                             int port, int regnum, u16 val)
-{
-       int addr = mv88e6131_port_to_phy_addr(ds, port);
-
-       if (addr < 0)
-               return addr;
-
-       return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-struct dsa_switch_driver mv88e6131_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_DSA,
-       .priv_size              = sizeof(struct mv88e6xxx_priv_state),
-       .probe                  = mv88e6131_probe,
-       .setup                  = mv88e6131_setup,
-       .set_addr               = mv88e6xxx_set_addr_direct,
-       .phy_read               = mv88e6131_phy_read,
-       .phy_write              = mv88e6131_phy_write,
-       .get_strings            = mv88e6xxx_get_strings,
-       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
-       .get_sset_count         = mv88e6xxx_get_sset_count,
-       .adjust_link            = mv88e6xxx_adjust_link,
-       .port_bridge_join       = mv88e6xxx_port_bridge_join,
-       .port_bridge_leave      = mv88e6xxx_port_bridge_leave,
-       .port_vlan_filtering    = mv88e6xxx_port_vlan_filtering,
-       .port_vlan_prepare      = mv88e6xxx_port_vlan_prepare,
-       .port_vlan_add          = mv88e6xxx_port_vlan_add,
-       .port_vlan_del          = mv88e6xxx_port_vlan_del,
-       .port_vlan_dump         = mv88e6xxx_port_vlan_dump,
-       .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
-       .port_fdb_add           = mv88e6xxx_port_fdb_add,
-       .port_fdb_del           = mv88e6xxx_port_fdb_del,
-       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6085");
-MODULE_ALIAS("platform:mv88e6095");
-MODULE_ALIAS("platform:mv88e6095f");
-MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
deleted file mode 100644 (file)
index 0e62f3b..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6171_table[] = {
-       { PORT_SWITCH_ID_6171, "Marvell 88E6171" },
-       { PORT_SWITCH_ID_6175, "Marvell 88E6175" },
-       { PORT_SWITCH_ID_6350, "Marvell 88E6350" },
-       { PORT_SWITCH_ID_6351, "Marvell 88E6351" },
-};
-
-static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
-{
-       return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table,
-                                    ARRAY_SIZE(mv88e6171_table));
-}
-
-static int mv88e6171_setup_global(struct dsa_switch *ds)
-{
-       u32 upstream_port = dsa_upstream_port(ds);
-       int ret;
-       u32 reg;
-
-       ret = mv88e6xxx_setup_global(ds);
-       if (ret)
-               return ret;
-
-       /* Discard packets with excessive collisions, mask all
-        * interrupt sources, enable PPU.
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
-       /* Configure the upstream port, and configure the upstream
-        * port as the port to which ingress and egress monitor frames
-        * are to be sent.
-        */
-       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
-       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-       /* Disable remote management for now, and set the switch's
-        * DSA device number.
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
-       return 0;
-}
-
-static int mv88e6171_setup(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       ret = mv88e6xxx_setup_common(ds);
-       if (ret < 0)
-               return ret;
-
-       ps->num_ports = 7;
-
-       ret = mv88e6xxx_switch_reset(ds, true);
-       if (ret < 0)
-               return ret;
-
-       ret = mv88e6171_setup_global(ds);
-       if (ret < 0)
-               return ret;
-
-       return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6171_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_EDSA,
-       .priv_size              = sizeof(struct mv88e6xxx_priv_state),
-       .probe                  = mv88e6171_probe,
-       .setup                  = mv88e6171_setup,
-       .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6xxx_phy_read_indirect,
-       .phy_write              = mv88e6xxx_phy_write_indirect,
-       .get_strings            = mv88e6xxx_get_strings,
-       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
-       .get_sset_count         = mv88e6xxx_get_sset_count,
-       .adjust_link            = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6xxx_get_temp,
-#endif
-       .get_regs_len           = mv88e6xxx_get_regs_len,
-       .get_regs               = mv88e6xxx_get_regs,
-       .port_bridge_join       = mv88e6xxx_port_bridge_join,
-       .port_bridge_leave      = mv88e6xxx_port_bridge_leave,
-       .port_stp_state_set     = mv88e6xxx_port_stp_state_set,
-       .port_vlan_filtering    = mv88e6xxx_port_vlan_filtering,
-       .port_vlan_prepare      = mv88e6xxx_port_vlan_prepare,
-       .port_vlan_add          = mv88e6xxx_port_vlan_add,
-       .port_vlan_del          = mv88e6xxx_port_vlan_del,
-       .port_vlan_dump         = mv88e6xxx_port_vlan_dump,
-       .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
-       .port_fdb_add           = mv88e6xxx_port_fdb_add,
-       .port_fdb_del           = mv88e6xxx_port_fdb_del,
-       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6175");
-MODULE_ALIAS("platform:mv88e6350");
-MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
deleted file mode 100644 (file)
index 7f452e4..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
- *
- * Copyright (c) 2014 Guenter Roeck
- *
- * Derived from mv88e6123_61_65.c
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
-       { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
-       { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
-       { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
-       { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
-       { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
-       { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
-       { PORT_SWITCH_ID_6321, "Marvell 88E6321" },
-       { PORT_SWITCH_ID_6321_A1, "Marvell 88E6321 (A1)" },
-       { PORT_SWITCH_ID_6321_A2, "Marvell 88e6321 (A2)" },
-       { PORT_SWITCH_ID_6352, "Marvell 88E6352" },
-       { PORT_SWITCH_ID_6352_A0, "Marvell 88E6352 (A0)" },
-       { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" },
-};
-
-static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
-{
-       return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table,
-                                    ARRAY_SIZE(mv88e6352_table));
-}
-
-static int mv88e6352_setup_global(struct dsa_switch *ds)
-{
-       u32 upstream_port = dsa_upstream_port(ds);
-       int ret;
-       u32 reg;
-
-       ret = mv88e6xxx_setup_global(ds);
-       if (ret)
-               return ret;
-
-       /* Discard packets with excessive collisions,
-        * mask all interrupt sources, enable PPU (bit 14, undocumented).
-        */
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
-       /* Configure the upstream port, and configure the upstream
-        * port as the port to which ingress and egress monitor frames
-        * are to be sent.
-        */
-       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
-       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-       /* Disable remote management for now, and set the switch's
-        * DSA device number.
-        */
-       REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-       return 0;
-}
-
-static int mv88e6352_setup(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       ret = mv88e6xxx_setup_common(ds);
-       if (ret < 0)
-               return ret;
-
-       ps->num_ports = 7;
-
-       mutex_init(&ps->eeprom_mutex);
-
-       ret = mv88e6xxx_switch_reset(ds, true);
-       if (ret < 0)
-               return ret;
-
-       ret = mv88e6352_setup_global(ds);
-       if (ret < 0)
-               return ret;
-
-       return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->eeprom_mutex);
-
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-                                 GLOBAL2_EEPROM_OP_READ |
-                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_eeprom_busy_wait(ds);
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
-error:
-       mutex_unlock(&ps->eeprom_mutex);
-       return ret;
-}
-
-static int mv88e6352_get_eeprom(struct dsa_switch *ds,
-                               struct ethtool_eeprom *eeprom, u8 *data)
-{
-       int offset;
-       int len;
-       int ret;
-
-       offset = eeprom->offset;
-       len = eeprom->len;
-       eeprom->len = 0;
-
-       eeprom->magic = 0xc3ec4951;
-
-       ret = mv88e6xxx_eeprom_load_wait(ds);
-       if (ret < 0)
-               return ret;
-
-       if (offset & 1) {
-               int word;
-
-               word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-               if (word < 0)
-                       return word;
-
-               *data++ = (word >> 8) & 0xff;
-
-               offset++;
-               len--;
-               eeprom->len++;
-       }
-
-       while (len >= 2) {
-               int word;
-
-               word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-               if (word < 0)
-                       return word;
-
-               *data++ = word & 0xff;
-               *data++ = (word >> 8) & 0xff;
-
-               offset += 2;
-               len -= 2;
-               eeprom->len += 2;
-       }
-
-       if (len) {
-               int word;
-
-               word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-               if (word < 0)
-                       return word;
-
-               *data++ = word & 0xff;
-
-               offset++;
-               len--;
-               eeprom->len++;
-       }
-
-       return 0;
-}
-
-static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
-{
-       int ret;
-
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
-       if (ret < 0)
-               return ret;
-
-       if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
-               return -EROFS;
-
-       return 0;
-}
-
-static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
-                                      u16 data)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->eeprom_mutex);
-
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-                                 GLOBAL2_EEPROM_OP_WRITE |
-                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_eeprom_busy_wait(ds);
-error:
-       mutex_unlock(&ps->eeprom_mutex);
-       return ret;
-}
-
-static int mv88e6352_set_eeprom(struct dsa_switch *ds,
-                               struct ethtool_eeprom *eeprom, u8 *data)
-{
-       int offset;
-       int ret;
-       int len;
-
-       if (eeprom->magic != 0xc3ec4951)
-               return -EINVAL;
-
-       ret = mv88e6352_eeprom_is_readonly(ds);
-       if (ret)
-               return ret;
-
-       offset = eeprom->offset;
-       len = eeprom->len;
-       eeprom->len = 0;
-
-       ret = mv88e6xxx_eeprom_load_wait(ds);
-       if (ret < 0)
-               return ret;
-
-       if (offset & 1) {
-               int word;
-
-               word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-               if (word < 0)
-                       return word;
-
-               word = (*data++ << 8) | (word & 0xff);
-
-               ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-               if (ret < 0)
-                       return ret;
-
-               offset++;
-               len--;
-               eeprom->len++;
-       }
-
-       while (len >= 2) {
-               int word;
-
-               word = *data++;
-               word |= *data++ << 8;
-
-               ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-               if (ret < 0)
-                       return ret;
-
-               offset += 2;
-               len -= 2;
-               eeprom->len += 2;
-       }
-
-       if (len) {
-               int word;
-
-               word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-               if (word < 0)
-                       return word;
-
-               word = (word & 0xff00) | *data++;
-
-               ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-               if (ret < 0)
-                       return ret;
-
-               offset++;
-               len--;
-               eeprom->len++;
-       }
-
-       return 0;
-}
-
-struct dsa_switch_driver mv88e6352_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_EDSA,
-       .priv_size              = sizeof(struct mv88e6xxx_priv_state),
-       .probe                  = mv88e6352_probe,
-       .setup                  = mv88e6352_setup,
-       .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6xxx_phy_read_indirect,
-       .phy_write              = mv88e6xxx_phy_write_indirect,
-       .get_strings            = mv88e6xxx_get_strings,
-       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
-       .get_sset_count         = mv88e6xxx_get_sset_count,
-       .adjust_link            = mv88e6xxx_adjust_link,
-       .set_eee                = mv88e6xxx_set_eee,
-       .get_eee                = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6xxx_get_temp,
-       .get_temp_limit         = mv88e6xxx_get_temp_limit,
-       .set_temp_limit         = mv88e6xxx_set_temp_limit,
-       .get_temp_alarm         = mv88e6xxx_get_temp_alarm,
-#endif
-       .get_eeprom             = mv88e6352_get_eeprom,
-       .set_eeprom             = mv88e6352_set_eeprom,
-       .get_regs_len           = mv88e6xxx_get_regs_len,
-       .get_regs               = mv88e6xxx_get_regs,
-       .port_bridge_join       = mv88e6xxx_port_bridge_join,
-       .port_bridge_leave      = mv88e6xxx_port_bridge_leave,
-       .port_stp_state_set     = mv88e6xxx_port_stp_state_set,
-       .port_vlan_filtering    = mv88e6xxx_port_vlan_filtering,
-       .port_vlan_prepare      = mv88e6xxx_port_vlan_prepare,
-       .port_vlan_add          = mv88e6xxx_port_vlan_add,
-       .port_vlan_del          = mv88e6xxx_port_vlan_del,
-       .port_vlan_dump         = mv88e6xxx_port_vlan_dump,
-       .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
-       .port_fdb_add           = mv88e6xxx_port_fdb_add,
-       .port_fdb_del           = mv88e6xxx_port_fdb_del,
-       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6172");
-MODULE_ALIAS("platform:mv88e6176");
-MODULE_ALIAS("platform:mv88e6320");
-MODULE_ALIAS("platform:mv88e6321");
-MODULE_ALIAS("platform:mv88e6352");
index 62320fca6712204cdc1828eaf36273a63af224bb..a3f0e7ec406766733df313f0bc1f7c73d24461e3 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright (c) 2015 CMC Electronics, Inc.
  *     Added support for VLAN Table Unit operations
  *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -17,6 +19,7 @@
 #include <linux/if_bridge.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
+#include <linux/mdio.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/gpio/consumer.h>
 #include <net/switchdev.h>
 #include "mv88e6xxx.h"
 
-static void assert_smi_lock(struct dsa_switch *ds)
+static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
        if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
-               dev_err(ds->master_dev, "SMI lock not held!\n");
+               dev_err(ps->dev, "SMI lock not held!\n");
                dump_stack();
        }
 }
@@ -92,33 +93,29 @@ static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
        return ret & 0xffff;
 }
 
-static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
+                              int addr, int reg)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
        int ret;
 
-       assert_smi_lock(ds);
-
-       if (bus == NULL)
-               return -EINVAL;
+       assert_smi_lock(ps);
 
-       ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
+       ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
        if (ret < 0)
                return ret;
 
-       dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+       dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
                addr, reg, ret);
 
        return ret;
 }
 
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_reg_read(ds, addr, reg);
+       ret = _mv88e6xxx_reg_read(ps, addr, reg);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -156,58 +153,71 @@ static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
        return 0;
 }
 
-static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
-                               u16 val)
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+                               int reg, u16 val)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
-       assert_smi_lock(ds);
-
-       if (bus == NULL)
-               return -EINVAL;
+       assert_smi_lock(ps);
 
-       dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+       dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
                addr, reg, val);
 
-       return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+       return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
 }
 
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+                       int reg, u16 val)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
+       ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
-       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
-       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
-       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int err;
 
-       return 0;
+       err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
+                                 (addr[0] << 8) | addr[1]);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
+                                 (addr[2] << 8) | addr[3]);
+       if (err)
+               return err;
+
+       return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
+                                  (addr[4] << 8) | addr[5]);
 }
 
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
 {
-       int i;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
+       int i;
 
        for (i = 0; i < 6; i++) {
                int j;
 
                /* Write the MAC address byte. */
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
-                         GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
+               ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+                                         GLOBAL2_SWITCH_MAC_BUSY |
+                                         (i << 8) | addr[i]);
+               if (ret)
+                       return ret;
 
                /* Wait for the write to complete. */
                for (j = 0; j < 16; j++) {
-                       ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+                       ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
+                                                GLOBAL2_SWITCH_MAC);
+                       if (ret < 0)
+                               return ret;
+
                        if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
                                break;
                }
@@ -218,34 +228,52 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
        return 0;
 }
 
-static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
+               return mv88e6xxx_set_addr_indirect(ds, addr);
+       else
+               return mv88e6xxx_set_addr_direct(ds, addr);
+}
+
+static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
+                              int regnum)
 {
        if (addr >= 0)
-               return _mv88e6xxx_reg_read(ds, addr, regnum);
+               return _mv88e6xxx_reg_read(ps, addr, regnum);
        return 0xffff;
 }
 
-static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
-                               u16 val)
+static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
+                               int regnum, u16 val)
 {
        if (addr >= 0)
-               return _mv88e6xxx_reg_write(ds, addr, regnum, val);
+               return _mv88e6xxx_reg_write(ps, addr, regnum, val);
        return 0;
 }
 
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
-static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
 {
        int ret;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-                 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+                                  ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+       if (ret)
+               return ret;
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+               if (ret < 0)
+                       return ret;
+
                usleep_range(1000, 2000);
                if ((ret & GLOBAL_STATUS_PPU_MASK) !=
                    GLOBAL_STATUS_PPU_POLLING)
@@ -255,17 +283,26 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
        return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
 {
-       int ret;
+       int ret, err;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
-       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
+       ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+       if (ret < 0)
+               return ret;
+
+       err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+                                 ret | GLOBAL_CONTROL_PPU_ENABLE);
+       if (err)
+               return err;
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+               ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+               if (ret < 0)
+                       return ret;
+
                usleep_range(1000, 2000);
                if ((ret & GLOBAL_STATUS_PPU_MASK) ==
                    GLOBAL_STATUS_PPU_POLLING)
@@ -281,9 +318,7 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
 
        ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
        if (mutex_trylock(&ps->ppu_mutex)) {
-               struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
-
-               if (mv88e6xxx_ppu_enable(ds) == 0)
+               if (mv88e6xxx_ppu_enable(ps) == 0)
                        ps->ppu_disabled = 0;
                mutex_unlock(&ps->ppu_mutex);
        }
@@ -296,9 +331,8 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
        schedule_work(&ps->ppu_work);
 }
 
-static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->ppu_mutex);
@@ -309,7 +343,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
         * it.
         */
        if (!ps->ppu_disabled) {
-               ret = mv88e6xxx_ppu_disable(ds);
+               ret = mv88e6xxx_ppu_disable(ps);
                if (ret < 0) {
                        mutex_unlock(&ps->ppu_mutex);
                        return ret;
@@ -323,19 +357,15 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
        return ret;
 }
 
-static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
        /* Schedule a timer to re-enable the PHY polling unit. */
        mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
        mutex_unlock(&ps->ppu_mutex);
 }
 
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
        mutex_init(&ps->ppu_mutex);
        INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
        init_timer(&ps->ppu_timer);
@@ -343,184 +373,84 @@ void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
        ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
 }
 
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+                                 int regnum)
 {
        int ret;
 
-       ret = mv88e6xxx_ppu_access_get(ds);
+       ret = mv88e6xxx_ppu_access_get(ps);
        if (ret >= 0) {
-               ret = mv88e6xxx_reg_read(ds, addr, regnum);
-               mv88e6xxx_ppu_access_put(ds);
+               ret = _mv88e6xxx_reg_read(ps, addr, regnum);
+               mv88e6xxx_ppu_access_put(ps);
        }
 
        return ret;
 }
 
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
-                           int regnum, u16 val)
+static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+                                  int regnum, u16 val)
 {
        int ret;
 
-       ret = mv88e6xxx_ppu_access_get(ds);
+       ret = mv88e6xxx_ppu_access_get(ps);
        if (ret >= 0) {
-               ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
-               mv88e6xxx_ppu_access_put(ds);
+               ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
+               mv88e6xxx_ppu_access_put(ps);
        }
 
        return ret;
 }
-#endif
 
-static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6031:
-       case PORT_SWITCH_ID_6061:
-       case PORT_SWITCH_ID_6035:
-       case PORT_SWITCH_ID_6065:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6065;
 }
 
-static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6092:
-       case PORT_SWITCH_ID_6095:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6095;
 }
 
-static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6046:
-       case PORT_SWITCH_ID_6085:
-       case PORT_SWITCH_ID_6096:
-       case PORT_SWITCH_ID_6097:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6097;
 }
 
-static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6123:
-       case PORT_SWITCH_ID_6161:
-       case PORT_SWITCH_ID_6165:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6165;
 }
 
-static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6121:
-       case PORT_SWITCH_ID_6122:
-       case PORT_SWITCH_ID_6152:
-       case PORT_SWITCH_ID_6155:
-       case PORT_SWITCH_ID_6182:
-       case PORT_SWITCH_ID_6185:
-       case PORT_SWITCH_ID_6108:
-       case PORT_SWITCH_ID_6131:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6185;
 }
 
-static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6320:
-       case PORT_SWITCH_ID_6321:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6320;
 }
 
-static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6171:
-       case PORT_SWITCH_ID_6175:
-       case PORT_SWITCH_ID_6350:
-       case PORT_SWITCH_ID_6351:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6351;
 }
 
-static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       switch (ps->id) {
-       case PORT_SWITCH_ID_6172:
-       case PORT_SWITCH_ID_6176:
-       case PORT_SWITCH_ID_6240:
-       case PORT_SWITCH_ID_6352:
-               return true;
-       }
-       return false;
+       return ps->info->family == MV88E6XXX_FAMILY_6352;
 }
 
-static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds)
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       /* The following devices have 4-bit identifiers for 16 databases */
-       if (ps->id == PORT_SWITCH_ID_6061)
-               return 16;
-
-       /* The following devices have 6-bit identifiers for 64 databases */
-       if (ps->id == PORT_SWITCH_ID_6065)
-               return 64;
-
-       /* The following devices have 8-bit identifiers for 256 databases */
-       if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
-               return 256;
-
-       /* The following devices have 12-bit identifiers for 4096 databases */
-       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
-               return 4096;
-
-       return 0;
+       return ps->info->num_databases;
 }
 
-static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds)
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
 {
        /* Does the device have dedicated FID registers for ATU and VTU ops? */
-       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
-               return true;
-
-       return false;
-}
-
-static bool mv88e6xxx_has_stu(struct dsa_switch *ds)
-{
-       /* Does the device have STU and dedicated SID registers for VTU ops? */
-       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
+       if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+           mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
                return true;
 
        return false;
@@ -530,8 +460,8 @@ static bool mv88e6xxx_has_stu(struct dsa_switch *ds)
  * phy. However, in the case of a fixed link phy, we force the port
  * settings from the fixed link settings.
  */
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
-                          struct phy_device *phydev)
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+                                 struct phy_device *phydev)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u32 reg;
@@ -542,7 +472,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
 
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
        if (ret < 0)
                goto out;
 
@@ -556,7 +486,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
        if (phydev->link)
                        reg |= PORT_PCS_CTRL_LINK_UP;
 
-       if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+       if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
                goto out;
 
        switch (phydev->speed) {
@@ -578,8 +508,8 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
        if (phydev->duplex == DUPLEX_FULL)
                reg |= PORT_PCS_CTRL_DUPLEX_FULL;
 
-       if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
-           (port >= ps->num_ports - 2)) {
+       if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
+           (port >= ps->info->num_ports - 2)) {
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
                        reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -588,19 +518,19 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
                        reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
                                PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
        }
-       _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+       _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
 
 out:
        mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
 {
        int ret;
        int i;
 
        for (i = 0; i < 10; i++) {
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
                if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
                        return 0;
        }
@@ -608,52 +538,54 @@ static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
        return -ETIMEDOUT;
 }
 
-static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
+                                    int port)
 {
        int ret;
 
-       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+       if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
                port = (port + 1) << 5;
 
        /* Snapshot the hardware statistics counters for this port. */
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
                                   GLOBAL_STATS_OP_CAPTURE_PORT |
                                   GLOBAL_STATS_OP_HIST_RX_TX | port);
        if (ret < 0)
                return ret;
 
        /* Wait for the snapshotting to complete. */
-       ret = _mv88e6xxx_stats_wait(ds);
+       ret = _mv88e6xxx_stats_wait(ps);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
+                                 int stat, u32 *val)
 {
        u32 _val;
        int ret;
 
        *val = 0;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
                                   GLOBAL_STATS_OP_READ_CAPTURED |
                                   GLOBAL_STATS_OP_HIST_RX_TX | stat);
        if (ret < 0)
                return;
 
-       ret = _mv88e6xxx_stats_wait(ds);
+       ret = _mv88e6xxx_stats_wait(ps);
        if (ret < 0)
                return;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
        if (ret < 0)
                return;
 
        _val = ret << 16;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
        if (ret < 0)
                return;
 
@@ -722,26 +654,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
        { "out_management",     4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
 };
 
-static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
                               struct mv88e6xxx_hw_stat *stat)
 {
        switch (stat->type) {
        case BANK0:
                return true;
        case BANK1:
-               return mv88e6xxx_6320_family(ds);
+               return mv88e6xxx_6320_family(ps);
        case PORT:
-               return mv88e6xxx_6095_family(ds) ||
-                       mv88e6xxx_6185_family(ds) ||
-                       mv88e6xxx_6097_family(ds) ||
-                       mv88e6xxx_6165_family(ds) ||
-                       mv88e6xxx_6351_family(ds) ||
-                       mv88e6xxx_6352_family(ds);
+               return mv88e6xxx_6095_family(ps) ||
+                       mv88e6xxx_6185_family(ps) ||
+                       mv88e6xxx_6097_family(ps) ||
+                       mv88e6xxx_6165_family(ps) ||
+                       mv88e6xxx_6351_family(ps) ||
+                       mv88e6xxx_6352_family(ps);
        }
        return false;
 }
 
-static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
                                            struct mv88e6xxx_hw_stat *s,
                                            int port)
 {
@@ -752,13 +684,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
 
        switch (s->type) {
        case PORT:
-               ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
+               ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
                if (ret < 0)
                        return UINT64_MAX;
 
                low = ret;
                if (s->sizeof_stat == 4) {
-                       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+                       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
                                                  s->reg + 1);
                        if (ret < 0)
                                return UINT64_MAX;
@@ -767,22 +699,24 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
                break;
        case BANK0:
        case BANK1:
-               _mv88e6xxx_stats_read(ds, s->reg, &low);
+               _mv88e6xxx_stats_read(ps, s->reg, &low);
                if (s->sizeof_stat == 8)
-                       _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+                       _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
        }
        value = (((u64)high) << 16) | low;
        return value;
 }
 
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+                                 uint8_t *data)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_hw_stat *stat;
        int i, j;
 
        for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
                stat = &mv88e6xxx_hw_stats[i];
-               if (mv88e6xxx_has_stat(ds, stat)) {
+               if (mv88e6xxx_has_stat(ps, stat)) {
                        memcpy(data + j * ETH_GSTRING_LEN, stat->string,
                               ETH_GSTRING_LEN);
                        j++;
@@ -790,22 +724,22 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
        }
 }
 
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_hw_stat *stat;
        int i, j;
 
        for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
                stat = &mv88e6xxx_hw_stats[i];
-               if (mv88e6xxx_has_stat(ds, stat))
+               if (mv88e6xxx_has_stat(ps, stat))
                        j++;
        }
        return j;
 }
 
-void
-mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-                           int port, uint64_t *data)
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+                                       uint64_t *data)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_hw_stat *stat;
@@ -814,15 +748,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_stats_snapshot(ds, port);
+       ret = _mv88e6xxx_stats_snapshot(ps, port);
        if (ret < 0) {
                mutex_unlock(&ps->smi_mutex);
                return;
        }
        for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
                stat = &mv88e6xxx_hw_stats[i];
-               if (mv88e6xxx_has_stat(ds, stat)) {
-                       data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
+               if (mv88e6xxx_has_stat(ps, stat)) {
+                       data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
                        j++;
                }
        }
@@ -830,14 +764,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
        mutex_unlock(&ps->smi_mutex);
 }
 
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
 {
        return 32 * sizeof(u16);
 }
 
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
-                       struct ethtool_regs *regs, void *_p)
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+                              struct ethtool_regs *regs, void *_p)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u16 *p = _p;
        int i;
 
@@ -845,16 +780,20 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 
        memset(p, 0xff, 32 * sizeof(u16));
 
+       mutex_lock(&ps->smi_mutex);
+
        for (i = 0; i < 32; i++) {
                int ret;
 
-               ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+               ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
                if (ret >= 0)
                        p[i] = ret;
        }
+
+       mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
                           u16 mask)
 {
        unsigned long timeout = jiffies + HZ / 10;
@@ -862,7 +801,7 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
        while (time_before(jiffies, timeout)) {
                int ret;
 
-               ret = _mv88e6xxx_reg_read(ds, reg, offset);
+               ret = _mv88e6xxx_reg_read(ps, reg, offset);
                if (ret < 0)
                        return ret;
                if (!(ret & mask))
@@ -873,91 +812,320 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
        return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
+                         int offset, u16 mask)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+       ret = _mv88e6xxx_wait(ps, reg, offset, mask);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
 {
-       return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+       return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
                               GLOBAL2_SMI_OP_BUSY);
 }
 
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
                              GLOBAL2_EEPROM_OP_LOAD);
 }
 
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
                              GLOBAL2_EEPROM_OP_BUSY);
 }
 
-static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->eeprom_mutex);
+
+       ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                                 GLOBAL2_EEPROM_OP_READ |
+                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+       if (ret < 0)
+               goto error;
+
+       ret = mv88e6xxx_eeprom_busy_wait(ds);
+       if (ret < 0)
+               goto error;
+
+       ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
+error:
+       mutex_unlock(&ps->eeprom_mutex);
+       return ret;
+}
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+               return ps->eeprom_len;
+
+       return 0;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+                               struct ethtool_eeprom *eeprom, u8 *data)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int offset;
+       int len;
+       int ret;
+
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+               return -EOPNOTSUPP;
+
+       offset = eeprom->offset;
+       len = eeprom->len;
+       eeprom->len = 0;
+
+       eeprom->magic = 0xc3ec4951;
+
+       ret = mv88e6xxx_eeprom_load_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       if (offset & 1) {
+               int word;
+
+               word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+               if (word < 0)
+                       return word;
+
+               *data++ = (word >> 8) & 0xff;
+
+               offset++;
+               len--;
+               eeprom->len++;
+       }
+
+       while (len >= 2) {
+               int word;
+
+               word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+               if (word < 0)
+                       return word;
+
+               *data++ = word & 0xff;
+               *data++ = (word >> 8) & 0xff;
+
+               offset += 2;
+               len -= 2;
+               eeprom->len += 2;
+       }
+
+       if (len) {
+               int word;
+
+               word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+               if (word < 0)
+                       return word;
+
+               *data++ = word & 0xff;
+
+               offset++;
+               len--;
+               eeprom->len++;
+       }
+
+       return 0;
+}
+
+static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
+       if (ret < 0)
+               return ret;
+
+       if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
+               return -EROFS;
+
+       return 0;
+}
+
+static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
+                                      u16 data)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->eeprom_mutex);
+
+       ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+       if (ret < 0)
+               goto error;
+
+       ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                                 GLOBAL2_EEPROM_OP_WRITE |
+                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+       if (ret < 0)
+               goto error;
+
+       ret = mv88e6xxx_eeprom_busy_wait(ds);
+error:
+       mutex_unlock(&ps->eeprom_mutex);
+       return ret;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+                               struct ethtool_eeprom *eeprom, u8 *data)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int offset;
+       int ret;
+       int len;
+
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+               return -EOPNOTSUPP;
+
+       if (eeprom->magic != 0xc3ec4951)
+               return -EINVAL;
+
+       ret = mv88e6xxx_eeprom_is_readonly(ds);
+       if (ret)
+               return ret;
+
+       offset = eeprom->offset;
+       len = eeprom->len;
+       eeprom->len = 0;
+
+       ret = mv88e6xxx_eeprom_load_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       if (offset & 1) {
+               int word;
+
+               word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+               if (word < 0)
+                       return word;
+
+               word = (*data++ << 8) | (word & 0xff);
+
+               ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+               if (ret < 0)
+                       return ret;
+
+               offset++;
+               len--;
+               eeprom->len++;
+       }
+
+       while (len >= 2) {
+               int word;
+
+               word = *data++;
+               word |= *data++ << 8;
+
+               ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+               if (ret < 0)
+                       return ret;
+
+               offset += 2;
+               len -= 2;
+               eeprom->len += 2;
+       }
+
+       if (len) {
+               int word;
+
+               word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+               if (word < 0)
+                       return word;
+
+               word = (word & 0xff00) | *data++;
+
+               ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+               if (ret < 0)
+                       return ret;
+
+               offset++;
+               len--;
+               eeprom->len++;
+       }
+
+       return 0;
+}
+
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
 {
-       return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+       return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
                               GLOBAL_ATU_OP_BUSY);
 }
 
-static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
-                                       int regnum)
+static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
+                                       int addr, int regnum)
 {
        int ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
                                   GLOBAL2_SMI_OP_22_READ | (addr << 5) |
                                   regnum);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_phy_wait(ds);
+       ret = _mv88e6xxx_phy_wait(ps);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+
+       return ret;
 }
 
-static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
-                                        int regnum, u16 val)
+static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
+                                        int addr, int regnum, u16 val)
 {
        int ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
                                   GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
                                   regnum);
 
-       return _mv88e6xxx_phy_wait(ds);
+       return _mv88e6xxx_phy_wait(ps);
 }
 
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
+                            struct ethtool_eee *e)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int reg;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
-       reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+       reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
        if (reg < 0)
                goto out;
 
        e->eee_enabled = !!(reg & 0x0200);
        e->tx_lpi_enabled = !!(reg & 0x0100);
 
-       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+       reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
        if (reg < 0)
                goto out;
 
@@ -969,16 +1137,19 @@ out:
        return reg;
 }
 
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
-                     struct phy_device *phydev, struct ethtool_eee *e)
+static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+                            struct phy_device *phydev, struct ethtool_eee *e)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int reg;
        int ret;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+       ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
        if (ret < 0)
                goto out;
 
@@ -988,28 +1159,28 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
        if (e->tx_lpi_enabled)
                reg |= 0x0100;
 
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+       ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
 out:
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd)
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
 {
        int ret;
 
-       if (mv88e6xxx_has_fid_reg(ds)) {
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+       if (mv88e6xxx_has_fid_reg(ps)) {
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
                if (ret < 0)
                        return ret;
-       } else if (mv88e6xxx_num_databases(ds) == 256) {
+       } else if (mv88e6xxx_num_databases(ps) == 256) {
                /* ATU DBNum[7:4] are located in ATU Control 15:12 */
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
                if (ret < 0)
                        return ret;
 
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
                                           (ret & 0xfff) |
                                           ((fid << 8) & 0xf000));
                if (ret < 0)
@@ -1019,14 +1190,14 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd)
                cmd |= fid & 0xf;
        }
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_atu_wait(ds);
+       return _mv88e6xxx_atu_wait(ps);
 }
 
-static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
                                     struct mv88e6xxx_atu_entry *entry)
 {
        u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -1046,21 +1217,21 @@ static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
                data |= (entry->portv_trunkid << shift) & mask;
        }
 
-       return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+       return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
 }
 
-static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
                                     struct mv88e6xxx_atu_entry *entry,
                                     bool static_too)
 {
        int op;
        int err;
 
-       err = _mv88e6xxx_atu_wait(ds);
+       err = _mv88e6xxx_atu_wait(ps);
        if (err)
                return err;
 
-       err = _mv88e6xxx_atu_data_write(ds, entry);
+       err = _mv88e6xxx_atu_data_write(ps, entry);
        if (err)
                return err;
 
@@ -1072,21 +1243,22 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
                        GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
        }
 
-       return _mv88e6xxx_atu_cmd(ds, entry->fid, op);
+       return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
 }
 
-static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
+                               u16 fid, bool static_too)
 {
        struct mv88e6xxx_atu_entry entry = {
                .fid = fid,
                .state = 0, /* EntryState bits must be 0 */
        };
 
-       return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+       return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
 }
 
-static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
-                              int to_port, bool static_too)
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
+                              int from_port, int to_port, bool static_too)
 {
        struct mv88e6xxx_atu_entry entry = {
                .trunk = false,
@@ -1100,14 +1272,14 @@ static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
        entry.portv_trunkid = (to_port & 0x0f) << 4;
        entry.portv_trunkid |= from_port & 0x0f;
 
-       return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+       return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
 }
 
-static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
-                                bool static_too)
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
+                                int port, bool static_too)
 {
        /* Destination port 0xF means remove the entries */
-       return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
+       return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
 }
 
 static const char * const mv88e6xxx_port_state_names[] = {
@@ -1117,12 +1289,14 @@ static const char * const mv88e6xxx_port_state_names[] = {
        [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
 };
 
-static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
+static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
+                                u8 state)
 {
+       struct dsa_switch *ds = ps->ds;
        int reg, ret = 0;
        u8 oldstate;
 
-       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+       reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
        if (reg < 0)
                return reg;
 
@@ -1137,13 +1311,13 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
                     oldstate == PORT_CONTROL_STATE_FORWARDING)
                    && (state == PORT_CONTROL_STATE_DISABLED ||
                        state == PORT_CONTROL_STATE_BLOCKING)) {
-                       ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
+                       ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
                        if (ret)
                                return ret;
                }
 
                reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
                                           reg);
                if (ret)
                        return ret;
@@ -1156,11 +1330,12 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
        return ret;
 }
 
-static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
+                                         int port)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct net_device *bridge = ps->ports[port].bridge_dev;
-       const u16 mask = (1 << ps->num_ports) - 1;
+       const u16 mask = (1 << ps->info->num_ports) - 1;
+       struct dsa_switch *ds = ps->ds;
        u16 output_ports = 0;
        int reg;
        int i;
@@ -1169,7 +1344,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
        if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
                output_ports = mask;
        } else {
-               for (i = 0; i < ps->num_ports; ++i) {
+               for (i = 0; i < ps->info->num_ports; ++i) {
                        /* allow sending frames to every group member */
                        if (bridge && ps->ports[i].bridge_dev == bridge)
                                output_ports |= BIT(i);
@@ -1183,21 +1358,25 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
        /* prevent frames from going back out of the port they came in on */
        output_ports &= ~BIT(port);
 
-       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+       reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
        if (reg < 0)
                return reg;
 
        reg &= ~mask;
        reg |= output_ports & mask;
 
-       return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+       return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
 }
 
-void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+                                        u8 state)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int stp_state;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
+               return;
+
        switch (state) {
        case BR_STATE_DISABLED:
                stp_state = PORT_CONTROL_STATE_DISABLED;
@@ -1223,13 +1402,14 @@ void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
        schedule_work(&ps->bridge_work);
 }
 
-static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
-                               u16 *old)
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
+                               u16 *new, u16 *old)
 {
+       struct dsa_switch *ds = ps->ds;
        u16 pvid;
        int ret;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
        if (ret < 0)
                return ret;
 
@@ -1239,7 +1419,7 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
                ret &= ~PORT_DEFAULT_VLAN_MASK;
                ret |= *new & PORT_DEFAULT_VLAN_MASK;
 
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_DEFAULT_VLAN, ret);
                if (ret < 0)
                        return ret;
@@ -1254,55 +1434,56 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
        return 0;
 }
 
-static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
+                                   int port, u16 *pvid)
 {
-       return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
+       return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
 }
 
-static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
+                                   int port, u16 pvid)
 {
-       return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
+       return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
 }
 
-static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
 {
-       return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+       return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
                               GLOBAL_VTU_OP_BUSY);
 }
 
-static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
 {
        int ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_vtu_wait(ds);
+       return _mv88e6xxx_vtu_wait(ps);
 }
 
-static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
 {
        int ret;
 
-       ret = _mv88e6xxx_vtu_wait(ds);
+       ret = _mv88e6xxx_vtu_wait(ps);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+       return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
 }
 
-static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
                                        struct mv88e6xxx_vtu_stu_entry *entry,
                                        unsigned int nibble_offset)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u16 regs[3];
        int i;
        int ret;
 
        for (i = 0; i < 3; ++i) {
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
                                          GLOBAL_VTU_DATA_0_3 + i);
                if (ret < 0)
                        return ret;
@@ -1310,7 +1491,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
                regs[i] = ret;
        }
 
-       for (i = 0; i < ps->num_ports; ++i) {
+       for (i = 0; i < ps->info->num_ports; ++i) {
                unsigned int shift = (i % 4) * 4 + nibble_offset;
                u16 reg = regs[i / 4];
 
@@ -1320,16 +1501,27 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
        return 0;
 }
 
-static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
+                                  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
+                                  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
                                         struct mv88e6xxx_vtu_stu_entry *entry,
                                         unsigned int nibble_offset)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u16 regs[3] = { 0 };
        int i;
        int ret;
 
-       for (i = 0; i < ps->num_ports; ++i) {
+       for (i = 0; i < ps->info->num_ports; ++i) {
                unsigned int shift = (i % 4) * 4 + nibble_offset;
                u8 data = entry->data[i];
 
@@ -1337,7 +1529,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
        }
 
        for (i = 0; i < 3; ++i) {
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
                                           GLOBAL_VTU_DATA_0_3 + i, regs[i]);
                if (ret < 0)
                        return ret;
@@ -1346,27 +1538,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
        return 0;
 }
 
-static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
+                                   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
+                                   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
 {
-       return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+       return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
                                    vid & GLOBAL_VTU_VID_MASK);
 }
 
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
                                  struct mv88e6xxx_vtu_stu_entry *entry)
 {
        struct mv88e6xxx_vtu_stu_entry next = { 0 };
        int ret;
 
-       ret = _mv88e6xxx_vtu_wait(ds);
+       ret = _mv88e6xxx_vtu_wait(ps);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+       ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
        if (ret < 0)
                return ret;
 
@@ -1374,22 +1578,22 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
        next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
 
        if (next.valid) {
-               ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+               ret = mv88e6xxx_vtu_data_read(ps, &next);
                if (ret < 0)
                        return ret;
 
-               if (mv88e6xxx_has_fid_reg(ds)) {
-                       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+               if (mv88e6xxx_has_fid_reg(ps)) {
+                       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
                                                  GLOBAL_VTU_FID);
                        if (ret < 0)
                                return ret;
 
                        next.fid = ret & GLOBAL_VTU_FID_MASK;
-               } else if (mv88e6xxx_num_databases(ds) == 256) {
+               } else if (mv88e6xxx_num_databases(ps) == 256) {
                        /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
                         * VTU DBNum[3:0] are located in VTU Operation 3:0
                         */
-                       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
                                                  GLOBAL_VTU_OP);
                        if (ret < 0)
                                return ret;
@@ -1398,8 +1602,8 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
                        next.fid |= ret & 0xf;
                }
 
-               if (mv88e6xxx_has_stu(ds)) {
-                       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+               if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
+                       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
                                                  GLOBAL_VTU_SID);
                        if (ret < 0)
                                return ret;
@@ -1412,27 +1616,30 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
        return 0;
 }
 
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
-                            struct switchdev_obj_port_vlan *vlan,
-                            int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+                                   struct switchdev_obj_port_vlan *vlan,
+                                   int (*cb)(struct switchdev_obj *obj))
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_vtu_stu_entry next;
        u16 pvid;
        int err;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
-       err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+       err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
        if (err)
                goto unlock;
 
-       err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+       err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
        if (err)
                goto unlock;
 
        do {
-               err = _mv88e6xxx_vtu_getnext(ds, &next);
+               err = _mv88e6xxx_vtu_getnext(ps, &next);
                if (err)
                        break;
 
@@ -1463,14 +1670,14 @@ unlock:
        return err;
 }
 
-static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
                                    struct mv88e6xxx_vtu_stu_entry *entry)
 {
        u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
        u16 reg = 0;
        int ret;
 
-       ret = _mv88e6xxx_vtu_wait(ds);
+       ret = _mv88e6xxx_vtu_wait(ps);
        if (ret < 0)
                return ret;
 
@@ -1478,23 +1685,23 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
                goto loadpurge;
 
        /* Write port member tags */
-       ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+       ret = mv88e6xxx_vtu_data_write(ps, entry);
        if (ret < 0)
                return ret;
 
-       if (mv88e6xxx_has_stu(ds)) {
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
                reg = entry->sid & GLOBAL_VTU_SID_MASK;
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
                if (ret < 0)
                        return ret;
        }
 
-       if (mv88e6xxx_has_fid_reg(ds)) {
+       if (mv88e6xxx_has_fid_reg(ps)) {
                reg = entry->fid & GLOBAL_VTU_FID_MASK;
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
                if (ret < 0)
                        return ret;
-       } else if (mv88e6xxx_num_databases(ds) == 256) {
+       } else if (mv88e6xxx_num_databases(ps) == 256) {
                /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
                 * VTU DBNum[3:0] are located in VTU Operation 3:0
                 */
@@ -1505,46 +1712,46 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
        reg = GLOBAL_VTU_VID_VALID;
 loadpurge:
        reg |= entry->vid & GLOBAL_VTU_VID_MASK;
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_vtu_cmd(ds, op);
+       return _mv88e6xxx_vtu_cmd(ps, op);
 }
 
-static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
                                  struct mv88e6xxx_vtu_stu_entry *entry)
 {
        struct mv88e6xxx_vtu_stu_entry next = { 0 };
        int ret;
 
-       ret = _mv88e6xxx_vtu_wait(ds);
+       ret = _mv88e6xxx_vtu_wait(ps);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
                                   sid & GLOBAL_VTU_SID_MASK);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+       ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
        if (ret < 0)
                return ret;
 
        next.sid = ret & GLOBAL_VTU_SID_MASK;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
        if (ret < 0)
                return ret;
 
        next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
 
        if (next.valid) {
-               ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+               ret = mv88e6xxx_stu_data_read(ps, &next);
                if (ret < 0)
                        return ret;
        }
@@ -1553,13 +1760,13 @@ static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
        return 0;
 }
 
-static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
                                    struct mv88e6xxx_vtu_stu_entry *entry)
 {
        u16 reg = 0;
        int ret;
 
-       ret = _mv88e6xxx_vtu_wait(ds);
+       ret = _mv88e6xxx_vtu_wait(ps);
        if (ret < 0)
                return ret;
 
@@ -1567,40 +1774,41 @@ static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
                goto loadpurge;
 
        /* Write port states */
-       ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+       ret = mv88e6xxx_stu_data_write(ps, entry);
        if (ret < 0)
                return ret;
 
        reg = GLOBAL_VTU_VID_VALID;
 loadpurge:
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
        if (ret < 0)
                return ret;
 
        reg = entry->sid & GLOBAL_VTU_SID_MASK;
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+       ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+       return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
 }
 
-static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
-                              u16 *old)
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
+                              u16 *new, u16 *old)
 {
+       struct dsa_switch *ds = ps->ds;
        u16 upper_mask;
        u16 fid;
        int ret;
 
-       if (mv88e6xxx_num_databases(ds) == 4096)
+       if (mv88e6xxx_num_databases(ps) == 4096)
                upper_mask = 0xff;
-       else if (mv88e6xxx_num_databases(ds) == 256)
+       else if (mv88e6xxx_num_databases(ps) == 256)
                upper_mask = 0xf;
        else
                return -EOPNOTSUPP;
 
        /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
-       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
        if (ret < 0)
                return ret;
 
@@ -1610,14 +1818,14 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
                ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
                ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
 
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
                                           ret);
                if (ret < 0)
                        return ret;
        }
 
        /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
-       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
        if (ret < 0)
                return ret;
 
@@ -1627,7 +1835,7 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
                ret &= ~upper_mask;
                ret |= (*new >> 4) & upper_mask;
 
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
                                           ret);
                if (ret < 0)
                        return ret;
@@ -1641,19 +1849,20 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
        return 0;
 }
 
-static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
+                                  int port, u16 *fid)
 {
-       return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+       return _mv88e6xxx_port_fid(ps, port, NULL, fid);
 }
 
-static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
+                                  int port, u16 fid)
 {
-       return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+       return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
 }
 
-static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
        struct mv88e6xxx_vtu_stu_entry vlan;
        int i, err;
@@ -1661,8 +1870,8 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
        bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
 
        /* Set every FID bit used by the (un)bridged ports */
-       for (i = 0; i < ps->num_ports; ++i) {
-               err = _mv88e6xxx_port_fid_get(ds, i, fid);
+       for (i = 0; i < ps->info->num_ports; ++i) {
+               err = _mv88e6xxx_port_fid_get(ps, i, fid);
                if (err)
                        return err;
 
@@ -1670,12 +1879,12 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
        }
 
        /* Set every FID bit used by the VLAN entries */
-       err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+       err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
        if (err)
                return err;
 
        do {
-               err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+               err = _mv88e6xxx_vtu_getnext(ps, &vlan);
                if (err)
                        return err;
 
@@ -1689,35 +1898,35 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
         * databases are not needed. Return the next positive available.
         */
        *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
-       if (unlikely(*fid >= mv88e6xxx_num_databases(ds)))
+       if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
                return -ENOSPC;
 
        /* Clear the database */
-       return _mv88e6xxx_atu_flush(ds, *fid, true);
+       return _mv88e6xxx_atu_flush(ps, *fid, true);
 }
 
-static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
                              struct mv88e6xxx_vtu_stu_entry *entry)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct dsa_switch *ds = ps->ds;
        struct mv88e6xxx_vtu_stu_entry vlan = {
                .valid = true,
                .vid = vid,
        };
        int i, err;
 
-       err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+       err = _mv88e6xxx_fid_new(ps, &vlan.fid);
        if (err)
                return err;
 
        /* exclude all ports except the CPU and DSA ports */
-       for (i = 0; i < ps->num_ports; ++i)
+       for (i = 0; i < ps->info->num_ports; ++i)
                vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
                        ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
                        : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
-       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+       if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+           mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
                struct mv88e6xxx_vtu_stu_entry vstp;
 
                /* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1725,7 +1934,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
                 * entries. Thus, validate the SID 0.
                 */
                vlan.sid = 0;
-               err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+               err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
                if (err)
                        return err;
 
@@ -1734,7 +1943,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
                        vstp.valid = true;
                        vstp.sid = vlan.sid;
 
-                       err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+                       err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
                        if (err)
                                return err;
                }
@@ -1744,7 +1953,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
        return 0;
 }
 
-static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
                              struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
 {
        int err;
@@ -1752,11 +1961,11 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
        if (!vid)
                return -EINVAL;
 
-       err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+       err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
        if (err)
                return err;
 
-       err = _mv88e6xxx_vtu_getnext(ds, entry);
+       err = _mv88e6xxx_vtu_getnext(ps, entry);
        if (err)
                return err;
 
@@ -1767,7 +1976,7 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
                 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
                 */
 
-               err = _mv88e6xxx_vtu_new(ds, vid, entry);
+               err = _mv88e6xxx_vtu_new(ps, vid, entry);
        }
 
        return err;
@@ -1785,12 +1994,12 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
 
        mutex_lock(&ps->smi_mutex);
 
-       err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
+       err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
        if (err)
                goto unlock;
 
        do {
-               err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+               err = _mv88e6xxx_vtu_getnext(ps, &vlan);
                if (err)
                        goto unlock;
 
@@ -1800,7 +2009,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
                if (vlan.vid > vid_end)
                        break;
 
-               for (i = 0; i < ps->num_ports; ++i) {
+               for (i = 0; i < ps->info->num_ports; ++i) {
                        if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
                                continue;
 
@@ -1834,17 +2043,20 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
        [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
 };
 
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
-                                 bool vlan_filtering)
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+                                        bool vlan_filtering)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
                PORT_CONTROL_2_8021Q_DISABLED;
        int ret;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+       ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
        if (ret < 0)
                goto unlock;
 
@@ -1854,7 +2066,7 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
                ret &= ~PORT_CONTROL_2_8021Q_MASK;
                ret |= new & PORT_CONTROL_2_8021Q_MASK;
 
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
                                           ret);
                if (ret < 0)
                        goto unlock;
@@ -1871,12 +2083,16 @@ unlock:
        return ret;
 }
 
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-                               const struct switchdev_obj_port_vlan *vlan,
-                               struct switchdev_trans *trans)
+static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+                                      const struct switchdev_obj_port_vlan *vlan,
+                                      struct switchdev_trans *trans)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int err;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+               return -EOPNOTSUPP;
+
        /* If the requested port doesn't belong to the same bridge as the VLAN
         * members, do not support it (yet) and fallback to software VLAN.
         */
@@ -1891,13 +2107,13 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
        return 0;
 }
 
-static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
-                                   bool untagged)
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
+                                   u16 vid, bool untagged)
 {
        struct mv88e6xxx_vtu_stu_entry vlan;
        int err;
 
-       err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
+       err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
        if (err)
                return err;
 
@@ -1905,39 +2121,43 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
                GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
                GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
 
-       return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+       return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
 }
 
-void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-                            const struct switchdev_obj_port_vlan *vlan,
-                            struct switchdev_trans *trans)
+static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+                                   const struct switchdev_obj_port_vlan *vlan,
+                                   struct switchdev_trans *trans)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
        u16 vid;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+               return;
+
        mutex_lock(&ps->smi_mutex);
 
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
-               if (_mv88e6xxx_port_vlan_add(ds, port, vid, untagged))
+               if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
                        netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
                                   vid, untagged ? 'u' : 't');
 
-       if (pvid && _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end))
+       if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
                netdev_err(ds->ports[port], "failed to set PVID %d\n",
                           vlan->vid_end);
 
        mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
+                                   int port, u16 vid)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct dsa_switch *ds = ps->ds;
        struct mv88e6xxx_vtu_stu_entry vlan;
        int i, err;
 
-       err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+       err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
        if (err)
                return err;
 
@@ -1949,7 +2169,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
 
        /* keep the VLAN unless all ports are excluded */
        vlan.valid = false;
-       for (i = 0; i < ps->num_ports; ++i) {
+       for (i = 0; i < ps->info->num_ports; ++i) {
                if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
                        continue;
 
@@ -1959,33 +2179,36 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
                }
        }
 
-       err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+       err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
        if (err)
                return err;
 
-       return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
+       return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
 }
 
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
-                           const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+                                  const struct switchdev_obj_port_vlan *vlan)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        u16 pvid, vid;
        int err = 0;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
-       err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+       err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
        if (err)
                goto unlock;
 
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
-               err = _mv88e6xxx_port_vlan_del(ds, port, vid);
+               err = _mv88e6xxx_port_vlan_del(ps, port, vid);
                if (err)
                        goto unlock;
 
                if (vid == pvid) {
-                       err = _mv88e6xxx_port_pvid_set(ds, port, 0);
+                       err = _mv88e6xxx_port_pvid_set(ps, port, 0);
                        if (err)
                                goto unlock;
                }
@@ -1997,14 +2220,14 @@ unlock:
        return err;
 }
 
-static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
                                    const unsigned char *addr)
 {
        int i, ret;
 
        for (i = 0; i < 3; i++) {
                ret = _mv88e6xxx_reg_write(
-                       ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+                       ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
                        (addr[i * 2] << 8) | addr[i * 2 + 1]);
                if (ret < 0)
                        return ret;
@@ -2013,12 +2236,13 @@ static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
        return 0;
 }
 
-static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
+                                  unsigned char *addr)
 {
        int i, ret;
 
        for (i = 0; i < 3; i++) {
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
                                          GLOBAL_ATU_MAC_01 + i);
                if (ret < 0)
                        return ret;
@@ -2029,27 +2253,27 @@ static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
        return 0;
 }
 
-static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
                               struct mv88e6xxx_atu_entry *entry)
 {
        int ret;
 
-       ret = _mv88e6xxx_atu_wait(ds);
+       ret = _mv88e6xxx_atu_wait(ps);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
+       ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_data_write(ds, entry);
+       ret = _mv88e6xxx_atu_data_write(ps, entry);
        if (ret < 0)
                return ret;
 
-       return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+       return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
 }
 
-static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
                                    const unsigned char *addr, u16 vid,
                                    u8 state)
 {
@@ -2059,9 +2283,9 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
 
        /* Null VLAN ID corresponds to the port private database */
        if (vid == 0)
-               err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+               err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
        else
-               err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+               err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
        if (err)
                return err;
 
@@ -2073,49 +2297,60 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
                entry.portv_trunkid = BIT(port);
        }
 
-       return _mv88e6xxx_atu_load(ds, &entry);
+       return _mv88e6xxx_atu_load(ps, &entry);
 }
 
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
-                              const struct switchdev_obj_port_fdb *fdb,
-                              struct switchdev_trans *trans)
+static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+                                     const struct switchdev_obj_port_fdb *fdb,
+                                     struct switchdev_trans *trans)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+               return -EOPNOTSUPP;
+
        /* We don't need any dynamic resource from the kernel (yet),
         * so skip the prepare phase.
         */
        return 0;
 }
 
-void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-                           const struct switchdev_obj_port_fdb *fdb,
-                           struct switchdev_trans *trans)
+static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+                                  const struct switchdev_obj_port_fdb *fdb,
+                                  struct switchdev_trans *trans)
 {
        int state = is_multicast_ether_addr(fdb->addr) ?
                GLOBAL_ATU_DATA_STATE_MC_STATIC :
                GLOBAL_ATU_DATA_STATE_UC_STATIC;
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+               return;
+
        mutex_lock(&ps->smi_mutex);
-       if (_mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state))
+       if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
                netdev_err(ds->ports[port], "failed to load MAC address\n");
        mutex_unlock(&ps->smi_mutex);
 }
 
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-                          const struct switchdev_obj_port_fdb *fdb)
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+                                 const struct switchdev_obj_port_fdb *fdb)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
+       ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
                                       GLOBAL_ATU_DATA_STATE_UNUSED);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
                                  struct mv88e6xxx_atu_entry *entry)
 {
        struct mv88e6xxx_atu_entry next = { 0 };
@@ -2123,19 +2358,19 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
 
        next.fid = fid;
 
-       ret = _mv88e6xxx_atu_wait(ds);
+       ret = _mv88e6xxx_atu_wait(ps);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+       ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+       ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+       ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
        if (ret < 0)
                return ret;
 
@@ -2160,8 +2395,8 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
        return 0;
 }
 
-static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
-                                       int port,
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
+                                       u16 fid, u16 vid, int port,
                                        struct switchdev_obj_port_fdb *fdb,
                                        int (*cb)(struct switchdev_obj *obj))
 {
@@ -2170,12 +2405,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
        };
        int err;
 
-       err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+       err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
        if (err)
                return err;
 
        do {
-               err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+               err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
                if (err)
                        break;
 
@@ -2201,9 +2436,9 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
        return err;
 }
 
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
-                           struct switchdev_obj_port_fdb *fdb,
-                           int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+                                  struct switchdev_obj_port_fdb *fdb,
+                                  int (*cb)(struct switchdev_obj *obj))
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_vtu_stu_entry vlan = {
@@ -2212,31 +2447,34 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
        u16 fid;
        int err;
 
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+               return -EOPNOTSUPP;
+
        mutex_lock(&ps->smi_mutex);
 
        /* Dump port's default Filtering Information Database (VLAN ID 0) */
-       err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+       err = _mv88e6xxx_port_fid_get(ps, port, &fid);
        if (err)
                goto unlock;
 
-       err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+       err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
        if (err)
                goto unlock;
 
        /* Dump VLANs' Filtering Information Databases */
-       err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+       err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
        if (err)
                goto unlock;
 
        do {
-               err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+               err = _mv88e6xxx_vtu_getnext(ps, &vlan);
                if (err)
                        break;
 
                if (!vlan.valid)
                        break;
 
-               err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+               err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
                                                   fdb, cb);
                if (err)
                        break;
@@ -2248,68 +2486,50 @@ unlock:
        return err;
 }
 
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-                              struct net_device *bridge)
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+                                     struct net_device *bridge)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u16 fid;
-       int i, err;
-
-       mutex_lock(&ps->smi_mutex);
+       int i, err = 0;
 
-       /* Get or create the bridge FID and assign it to the port */
-       for (i = 0; i < ps->num_ports; ++i)
-               if (ps->ports[i].bridge_dev == bridge)
-                       break;
-
-       if (i < ps->num_ports)
-               err = _mv88e6xxx_port_fid_get(ds, i, &fid);
-       else
-               err = _mv88e6xxx_fid_new(ds, &fid);
-       if (err)
-               goto unlock;
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+               return -EOPNOTSUPP;
 
-       err = _mv88e6xxx_port_fid_set(ds, port, fid);
-       if (err)
-               goto unlock;
+       mutex_lock(&ps->smi_mutex);
 
        /* Assign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = bridge;
 
-       for (i = 0; i < ps->num_ports; ++i) {
+       for (i = 0; i < ps->info->num_ports; ++i) {
                if (ps->ports[i].bridge_dev == bridge) {
-                       err = _mv88e6xxx_port_based_vlan_map(ds, i);
+                       err = _mv88e6xxx_port_based_vlan_map(ps, i);
                        if (err)
                                break;
                }
        }
 
-unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return err;
 }
 
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct net_device *bridge = ps->ports[port].bridge_dev;
-       u16 fid;
        int i;
 
-       mutex_lock(&ps->smi_mutex);
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+               return;
 
-       /* Give the port a fresh Filtering Information Database */
-       if (_mv88e6xxx_fid_new(ds, &fid) ||
-           _mv88e6xxx_port_fid_set(ds, port, fid))
-               netdev_warn(ds->ports[port], "failed to assign a new FID\n");
+       mutex_lock(&ps->smi_mutex);
 
        /* Unassign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = NULL;
 
-       for (i = 0; i < ps->num_ports; ++i)
+       for (i = 0; i < ps->info->num_ports; ++i)
                if (i == port || ps->ports[i].bridge_dev == bridge)
-                       if (_mv88e6xxx_port_based_vlan_map(ds, i))
+                       if (_mv88e6xxx_port_based_vlan_map(ps, i))
                                netdev_warn(ds->ports[i], "failed to remap\n");
 
        mutex_unlock(&ps->smi_mutex);
@@ -2322,63 +2542,126 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
        int port;
 
        ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
-       ds = ((struct dsa_switch *)ps) - 1;
+       ds = ps->ds;
 
        mutex_lock(&ps->smi_mutex);
 
-       for (port = 0; port < ps->num_ports; ++port)
+       for (port = 0; port < ps->info->num_ports; ++port)
                if (test_and_clear_bit(port, ps->port_state_update_mask) &&
-                   _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
-                       netdev_warn(ds->ports[port], "failed to update state to %s\n",
+                   _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
+                       netdev_warn(ds->ports[port],
+                                   "failed to update state to %s\n",
                                    mv88e6xxx_port_state_names[ps->ports[port].state]);
 
        mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
-                                    int reg, int val)
+static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
+                                    int port, int page, int reg, int val)
 {
        int ret;
 
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
        if (ret < 0)
                goto restore_page_0;
 
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+       ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
 restore_page_0:
-       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
 
        return ret;
 }
 
-static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
-                                   int reg)
+static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
+                                   int port, int page, int reg)
 {
        int ret;
 
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
        if (ret < 0)
                goto restore_page_0;
 
-       ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+       ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
 restore_page_0:
-       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
+
+       return ret;
+}
+
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
+{
+       bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
+       u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+       struct gpio_desc *gpiod = ps->reset;
+       unsigned long timeout;
+       int ret;
+       int i;
+
+       /* Set all ports to the disabled state. */
+       for (i = 0; i < ps->info->num_ports; i++) {
+               ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
+               if (ret < 0)
+                       return ret;
+
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
+                                          ret & 0xfffc);
+               if (ret)
+                       return ret;
+       }
+
+       /* Wait for transmit queues to drain. */
+       usleep_range(2000, 4000);
+
+       /* If there is a gpio connected to the reset pin, toggle it */
+       if (gpiod) {
+               gpiod_set_value_cansleep(gpiod, 1);
+               usleep_range(10000, 20000);
+               gpiod_set_value_cansleep(gpiod, 0);
+               usleep_range(10000, 20000);
+       }
+
+       /* Reset the switch. Keep the PPU active if requested. The PPU
+        * needs to be active to support indirect phy register access
+        * through global registers 0x18 and 0x19.
+        */
+       if (ppu_active)
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
+       else
+               ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
+       if (ret)
+               return ret;
+
+       /* Wait up to one second for reset to complete. */
+       timeout = jiffies + 1 * HZ;
+       while (time_before(jiffies, timeout)) {
+               ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
+               if (ret < 0)
+                       return ret;
+
+               if ((ret & is_reset) == is_reset)
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if (time_after(jiffies, timeout))
+               ret = -ETIMEDOUT;
+       else
+               ret = 0;
 
        return ret;
 }
 
-static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
 {
        int ret;
 
-       ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+       ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
                                       MII_BMCR);
        if (ret < 0)
                return ret;
 
        if (ret & BMCR_PDOWN) {
                ret &= ~BMCR_PDOWN;
-               ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+               ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
                                                PAGE_FIBER_SERDES, MII_BMCR,
                                                ret);
        }
@@ -2386,32 +2669,30 @@ static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
        return ret;
 }
 
-static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct dsa_switch *ds = ps->ds;
        int ret;
        u16 reg;
 
-       mutex_lock(&ps->smi_mutex);
-
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-           mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+           mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
                /* MAC Forcing register: don't force link, speed,
                 * duplex or flow control state to any particular
                 * values on physical ports, but force the CPU port
                 * and all DSA ports to their maximum bandwidth and
                 * full duplex.
                 */
-               reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+               reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
                if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
                        reg &= ~PORT_PCS_CTRL_UNFORCED;
                        reg |= PORT_PCS_CTRL_FORCE_LINK |
                                PORT_PCS_CTRL_LINK_UP |
                                PORT_PCS_CTRL_DUPLEX_FULL |
                                PORT_PCS_CTRL_FORCE_DUPLEX;
-                       if (mv88e6xxx_6065_family(ds))
+                       if (mv88e6xxx_6065_family(ps))
                                reg |= PORT_PCS_CTRL_100;
                        else
                                reg |= PORT_PCS_CTRL_1000;
@@ -2419,10 +2700,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        reg |= PORT_PCS_CTRL_UNFORCED;
                }
 
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_PCS_CTRL, reg);
                if (ret)
-                       goto abort;
+                       return ret;
        }
 
        /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2440,19 +2721,19 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * forwarding of unknown unicasts and multicasts.
         */
        reg = 0;
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+           mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
                reg = PORT_CONTROL_IGMP_MLD_SNOOP |
                PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
                PORT_CONTROL_STATE_FORWARDING;
        if (dsa_is_cpu_port(ds, port)) {
-               if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+               if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
                        reg |= PORT_CONTROL_DSA_TAG;
-               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-                   mv88e6xxx_6320_family(ds)) {
+               if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+                   mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+                   mv88e6xxx_6320_family(ps)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
                        else
@@ -2461,20 +2742,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                PORT_CONTROL_FORWARD_UNKNOWN_MC;
                }
 
-               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-                   mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-                   mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
+               if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+                   mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+                   mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+                   mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_EGRESS_ADD_TAG;
                }
        }
        if (dsa_is_dsa_port(ds, port)) {
-               if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+               if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
                        reg |= PORT_CONTROL_DSA_TAG;
-               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-                   mv88e6xxx_6320_family(ds)) {
+               if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+                   mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+                   mv88e6xxx_6320_family(ps)) {
                        reg |= PORT_CONTROL_FRAME_MODE_DSA;
                }
 
@@ -2483,26 +2764,26 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                PORT_CONTROL_FORWARD_UNKNOWN_MC;
        }
        if (reg) {
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_CONTROL, reg);
                if (ret)
-                       goto abort;
+                       return ret;
        }
 
        /* If this port is connected to a SerDes, make sure the SerDes is not
         * powered down.
         */
-       if (mv88e6xxx_6352_family(ds)) {
-               ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+       if (mv88e6xxx_6352_family(ps)) {
+               ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
                if (ret < 0)
-                       goto abort;
+                       return ret;
                ret &= PORT_STATUS_CMODE_MASK;
                if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
                    (ret == PORT_STATUS_CMODE_1000BASE_X) ||
                    (ret == PORT_STATUS_CMODE_SGMII)) {
-                       ret = mv88e6xxx_power_on_serdes(ds);
+                       ret = mv88e6xxx_power_on_serdes(ps);
                        if (ret < 0)
-                               goto abort;
+                               return ret;
                }
        }
 
@@ -2513,17 +2794,17 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * copy of all transmitted/received frames on this port to the CPU.
         */
        reg = 0;
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds) ||
-           mv88e6xxx_6185_family(ds))
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
+           mv88e6xxx_6185_family(ps))
                reg = PORT_CONTROL_2_MAP_DA;
 
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
                reg |= PORT_CONTROL_2_JUMBO_10240;
 
-       if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+       if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
                /* Set the upstream port this port should use */
                reg |= dsa_upstream_port(ds);
                /* enable forwarding of unknown multicast addresses to
@@ -2536,10 +2817,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        reg |= PORT_CONTROL_2_8021Q_DISABLED;
 
        if (reg) {
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_CONTROL_2, reg);
                if (ret)
-                       goto abort;
+                       return ret;
        }
 
        /* Port Association Vector: when learning source addresses
@@ -2548,304 +2829,350 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * the other bits clear.
         */
        reg = 1 << port;
-       /* Disable learning for DSA and CPU ports */
-       if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
-               reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+       /* Disable learning for CPU port */
+       if (dsa_is_cpu_port(ds, port))
+               reg = 0;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
+       ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
        if (ret)
-               goto abort;
+               return ret;
 
        /* Egress rate control 2: disable egress rate control. */
-       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+       ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
                                   0x0000);
        if (ret)
-               goto abort;
+               return ret;
 
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6320_family(ps)) {
                /* Do not limit the period of time that this port can
                 * be paused for by the remote end or the period of
                 * time that this port can pause the remote end.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_PAUSE_CTRL, 0x0000);
                if (ret)
-                       goto abort;
+                       return ret;
 
                /* Port ATU control: disable limiting the number of
                 * address database entries that this port is allowed
                 * to use.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_ATU_CONTROL, 0x0000);
                /* Priority Override: disable DA, SA and VTU priority
                 * override.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_PRI_OVERRIDE, 0x0000);
                if (ret)
-                       goto abort;
+                       return ret;
 
                /* Port Ethertype: use the Ethertype DSA Ethertype
                 * value.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_ETH_TYPE, ETH_P_EDSA);
                if (ret)
-                       goto abort;
+                       return ret;
                /* Tag Remap: use an identity 802.1p prio -> switch
                 * prio mapping.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_TAG_REGMAP_0123, 0x3210);
                if (ret)
-                       goto abort;
+                       return ret;
 
                /* Tag Remap 2: use an identity 802.1p prio -> switch
                 * prio mapping.
                 */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_TAG_REGMAP_4567, 0x7654);
                if (ret)
-                       goto abort;
+                       return ret;
        }
 
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+           mv88e6xxx_6320_family(ps)) {
                /* Rate Control: disable ingress rate limiting. */
-               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+               ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
                                           PORT_RATE_CONTROL, 0x0001);
                if (ret)
-                       goto abort;
+                       return ret;
        }
 
        /* Port Control 1: disable trunking, disable sending
         * learning messages to this port.
         */
-       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+       ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
        if (ret)
-               goto abort;
+               return ret;
 
-       /* Port based VLAN map: give each port its own address
+       /* Port based VLAN map: give each port the same default address
         * database, and allow bidirectional communication between the
         * CPU and DSA port(s), and the other ports.
         */
-       ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+       ret = _mv88e6xxx_port_fid_set(ps, port, 0);
        if (ret)
-               goto abort;
+               return ret;
 
-       ret = _mv88e6xxx_port_based_vlan_map(ds, port);
+       ret = _mv88e6xxx_port_based_vlan_map(ps, port);
        if (ret)
-               goto abort;
+               return ret;
 
        /* Default VLAN ID and priority: don't set a default VLAN
         * ID, and set the default packet priority to zero.
         */
-       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+       ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
                                   0x0000);
-abort:
-       mutex_unlock(&ps->smi_mutex);
-       return ret;
-}
-
-int mv88e6xxx_setup_ports(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-       int i;
+       if (ret)
+               return ret;
 
-       for (i = 0; i < ps->num_ports; i++) {
-               ret = mv88e6xxx_setup_port(ds, i);
-               if (ret < 0)
-                       return ret;
-       }
        return 0;
 }
 
-int mv88e6xxx_setup_common(struct dsa_switch *ds)
+static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       mutex_init(&ps->smi_mutex);
+       struct dsa_switch *ds = ps->ds;
+       u32 upstream_port = dsa_upstream_port(ds);
+       u16 reg;
+       int err;
+       int i;
 
-       ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
+       /* Enable the PHY Polling Unit if present, don't discard any packets,
+        * and mask all interrupt sources.
+        */
+       reg = 0;
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
+           mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
+               reg |= GLOBAL_CONTROL_PPU_ENABLE;
 
-       INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
+       if (err)
+               return err;
 
-       return 0;
-}
+       /* Configure the upstream port, and configure it as the port to which
+        * ingress and egress and ARP monitor frames are to be sent.
+        */
+       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
+       if (err)
+               return err;
 
-int mv88e6xxx_setup_global(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-       int i;
+       /* Disable remote management, and set the switch's DSA device number. */
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
+                                  GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+                                  (ds->index & 0x1f));
+       if (err)
+               return err;
 
        /* Set the default address aging time to 5 minutes, and
         * enable address learn messages to be sent to all message
         * ports.
         */
-       REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
-                 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+                                  0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+       if (err)
+               return err;
 
        /* Configure the IP ToS mapping registers. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+       if (err)
+               return err;
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+       if (err)
+               return err;
 
        /* Configure the IEEE 802.1p priority mapping register. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+       if (err)
+               return err;
 
        /* Send all frames with destination addresses matching
         * 01:80:c2:00:00:0x to the CPU port.
         */
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+       if (err)
+               return err;
 
        /* Ignore removed tag data on doubly tagged packets, disable
         * flow control messages, force flow control priority to the
         * highest, and send all special multicast frames to the CPU
         * port at the highest priority.
         */
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
-                 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
-                 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+                                  0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+                                  GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+       if (err)
+               return err;
 
        /* Program the DSA routing table. */
        for (i = 0; i < 32; i++) {
                int nexthop = 0x1f;
 
-               if (ds->pd->rtable &&
-                   i != ds->index && i < ds->dst->pd->nr_chips)
-                       nexthop = ds->pd->rtable[i] & 0x1f;
+               if (ps->ds->cd->rtable &&
+                   i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
+                       nexthop = ps->ds->cd->rtable[i] & 0x1f;
 
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
-                         GLOBAL2_DEVICE_MAPPING_UPDATE |
-                         (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
-                         nexthop);
+               err = _mv88e6xxx_reg_write(
+                       ps, REG_GLOBAL2,
+                       GLOBAL2_DEVICE_MAPPING,
+                       GLOBAL2_DEVICE_MAPPING_UPDATE |
+                       (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
+               if (err)
+                       return err;
        }
 
        /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
-                         0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
-                         ((1 << ps->num_ports) - 1));
+       for (i = 0; i < 8; i++) {
+               err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+                                          0x8000 |
+                                          (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+                                          ((1 << ps->info->num_ports) - 1));
+               if (err)
+                       return err;
+       }
 
        /* Clear all trunk mappings. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
-                         GLOBAL2_TRUNK_MAPPING_UPDATE |
-                         (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
-
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
+       for (i = 0; i < 16; i++) {
+               err = _mv88e6xxx_reg_write(
+                       ps, REG_GLOBAL2,
+                       GLOBAL2_TRUNK_MAPPING,
+                       GLOBAL2_TRUNK_MAPPING_UPDATE |
+                       (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+               if (err)
+                       return err;
+       }
+
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6320_family(ps)) {
                /* Send all frames with destination addresses matching
                 * 01:80:c2:00:00:2x to the CPU port.
                 */
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+               err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+                                          GLOBAL2_MGMT_EN_2X, 0xffff);
+               if (err)
+                       return err;
 
                /* Initialise cross-chip port VLAN table to reset
                 * defaults.
                 */
-               REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+               err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+                                          GLOBAL2_PVT_ADDR, 0x9000);
+               if (err)
+                       return err;
 
                /* Clear the priority override table. */
-               for (i = 0; i < 16; i++)
-                       REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
-                                 0x8000 | (i << 8));
+               for (i = 0; i < 16; i++) {
+                       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+                                                  GLOBAL2_PRIO_OVERRIDE,
+                                                  0x8000 | (i << 8));
+                       if (err)
+                               return err;
+               }
        }
 
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
+       if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+           mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+           mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+           mv88e6xxx_6320_family(ps)) {
                /* Disable ingress rate limiting by resetting all
                 * ingress rate limit registers to their initial
                 * state.
                 */
-               for (i = 0; i < ps->num_ports; i++)
-                       REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
-                                 0x9000 | (i << 8));
+               for (i = 0; i < ps->info->num_ports; i++) {
+                       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+                                                  GLOBAL2_INGRESS_OP,
+                                                  0x9000 | (i << 8));
+                       if (err)
+                               return err;
+               }
        }
 
        /* Clear the statistics counters for all ports */
-       REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
+       err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+                                  GLOBAL_STATS_OP_FLUSH_ALL);
+       if (err)
+               return err;
 
        /* Wait for the flush to complete. */
-       mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_stats_wait(ds);
-       if (ret < 0)
-               goto unlock;
+       err = _mv88e6xxx_stats_wait(ps);
+       if (err)
+               return err;
 
        /* Clear all ATU entries */
-       ret = _mv88e6xxx_atu_flush(ds, 0, true);
-       if (ret < 0)
-               goto unlock;
+       err = _mv88e6xxx_atu_flush(ps, 0, true);
+       if (err)
+               return err;
 
        /* Clear all the VTU and STU entries */
-       ret = _mv88e6xxx_vtu_stu_flush(ds);
-unlock:
-       mutex_unlock(&ps->smi_mutex);
+       err = _mv88e6xxx_vtu_stu_flush(ps);
+       if (err < 0)
+               return err;
 
-       return ret;
+       return err;
 }
 
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+static int mv88e6xxx_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
-       struct gpio_desc *gpiod = ds->pd->reset;
-       unsigned long timeout;
-       int ret;
+       int err;
        int i;
 
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < ps->num_ports; i++) {
-               ret = REG_READ(REG_PORT(i), PORT_CONTROL);
-               REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
-       }
+       ps->ds = ds;
 
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
+       INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
-       /* If there is a gpio connected to the reset pin, toggle it */
-       if (gpiod) {
-               gpiod_set_value_cansleep(gpiod, 1);
-               usleep_range(10000, 20000);
-               gpiod_set_value_cansleep(gpiod, 0);
-               usleep_range(10000, 20000);
-       }
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+               mutex_init(&ps->eeprom_mutex);
 
-       /* Reset the switch. Keep the PPU active if requested. The PPU
-        * needs to be active to support indirect phy register access
-        * through global registers 0x18 and 0x19.
-        */
-       if (ppu_active)
-               REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
-       else
-               REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+               mv88e6xxx_ppu_state_init(ps);
 
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & is_reset) == is_reset)
-                       break;
-               usleep_range(1000, 2000);
+       mutex_lock(&ps->smi_mutex);
+
+       err = mv88e6xxx_switch_reset(ps);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_setup_global(ps);
+       if (err)
+               goto unlock;
+
+       for (i = 0; i < ps->info->num_ports; i++) {
+               err = mv88e6xxx_setup_port(ps, i);
+               if (err)
+                       goto unlock;
        }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
 
-       return 0;
+unlock:
+       mutex_unlock(&ps->smi_mutex);
+
+       return err;
 }
 
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
@@ -2854,7 +3181,7 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
+       ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -2867,82 +3194,61 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
+       ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
+                                     int port)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       if (port >= 0 && port < ps->num_ports)
+       if (port >= 0 && port < ps->info->num_ports)
                return port;
        return -EINVAL;
 }
 
-int
-mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int addr = mv88e6xxx_port_to_phy_addr(ps, port);
        int ret;
 
        if (addr < 0)
-               return addr;
+               return 0xffff;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->smi_mutex);
-       return ret;
-}
-
-int
-mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
-       int ret;
 
-       if (addr < 0)
-               return addr;
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+               ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
+       else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+               ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
+       else
+               ret = _mv88e6xxx_phy_read(ps, addr, regnum);
 
-       mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
        mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
-int
-mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
+                              u16 val)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int addr = mv88e6xxx_port_to_phy_addr(ps, port);
        int ret;
 
        if (addr < 0)
-               return addr;
+               return 0xffff;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-       mutex_unlock(&ps->smi_mutex);
-       return ret;
-}
 
-int
-mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
-                            u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
-       int ret;
-
-       if (addr < 0)
-               return addr;
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+               ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
+       else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+               ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
+       else
+               ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
 
-       mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
        mutex_unlock(&ps->smi_mutex);
        return ret;
 }
@@ -2959,44 +3265,45 @@ static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
 
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+       ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
        if (ret < 0)
                goto error;
 
        /* Enable temperature sensor */
-       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
        if (ret < 0)
                goto error;
 
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+       ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
        if (ret < 0)
                goto error;
 
        /* Wait for temperature to stabilize */
        usleep_range(10000, 12000);
 
-       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
        if (val < 0) {
                ret = val;
                goto error;
        }
 
        /* Disable temperature sensor */
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+       ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
        if (ret < 0)
                goto error;
 
        *temp = ((val & 0x1f) - 5) * 5;
 
 error:
-       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+       _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
        mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
 {
-       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
        int ret;
 
        *temp = 0;
@@ -3010,20 +3317,26 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
        return 0;
 }
 
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 {
-       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
+               return -EOPNOTSUPP;
+
+       if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
                return mv88e63xx_get_temp(ds, temp);
 
        return mv88e61xx_get_temp(ds, temp);
 }
 
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
 {
-       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
        int ret;
 
-       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
                return -EOPNOTSUPP;
 
        *temp = 0;
@@ -3037,12 +3350,13 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
        return 0;
 }
 
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
 {
-       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
        int ret;
 
-       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
                return -EOPNOTSUPP;
 
        ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
@@ -3053,12 +3367,13 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
                                        (ret & 0xe0ff) | (temp << 8));
 }
 
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 {
-       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
        int ret;
 
-       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+       if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
                return -EOPNOTSUPP;
 
        *alarm = false;
@@ -3073,70 +3388,354 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
-                           const struct mv88e6xxx_switch_id *table,
-                           unsigned int num)
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+       [MV88E6085] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+               .family = MV88E6XXX_FAMILY_6097,
+               .name = "Marvell 88E6085",
+               .num_databases = 4096,
+               .num_ports = 10,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6097,
+       },
+
+       [MV88E6095] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+               .family = MV88E6XXX_FAMILY_6095,
+               .name = "Marvell 88E6095/88E6095F",
+               .num_databases = 256,
+               .num_ports = 11,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6095,
+       },
+
+       [MV88E6123] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+               .family = MV88E6XXX_FAMILY_6165,
+               .name = "Marvell 88E6123",
+               .num_databases = 4096,
+               .num_ports = 3,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+       },
+
+       [MV88E6131] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+               .family = MV88E6XXX_FAMILY_6185,
+               .name = "Marvell 88E6131",
+               .num_databases = 256,
+               .num_ports = 8,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+       },
+
+       [MV88E6161] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+               .family = MV88E6XXX_FAMILY_6165,
+               .name = "Marvell 88E6161",
+               .num_databases = 4096,
+               .num_ports = 6,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+       },
+
+       [MV88E6165] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+               .family = MV88E6XXX_FAMILY_6165,
+               .name = "Marvell 88E6165",
+               .num_databases = 4096,
+               .num_ports = 6,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+       },
+
+       [MV88E6171] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+               .family = MV88E6XXX_FAMILY_6351,
+               .name = "Marvell 88E6171",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+       },
+
+       [MV88E6172] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+               .family = MV88E6XXX_FAMILY_6352,
+               .name = "Marvell 88E6172",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+       },
+
+       [MV88E6175] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+               .family = MV88E6XXX_FAMILY_6351,
+               .name = "Marvell 88E6175",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+       },
+
+       [MV88E6176] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+               .family = MV88E6XXX_FAMILY_6352,
+               .name = "Marvell 88E6176",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+       },
+
+       [MV88E6185] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+               .family = MV88E6XXX_FAMILY_6185,
+               .name = "Marvell 88E6185",
+               .num_databases = 256,
+               .num_ports = 10,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+       },
+
+       [MV88E6240] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+               .family = MV88E6XXX_FAMILY_6352,
+               .name = "Marvell 88E6240",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+       },
+
+       [MV88E6320] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+               .family = MV88E6XXX_FAMILY_6320,
+               .name = "Marvell 88E6320",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+       },
+
+       [MV88E6321] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+               .family = MV88E6XXX_FAMILY_6320,
+               .name = "Marvell 88E6321",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+       },
+
+       [MV88E6350] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+               .family = MV88E6XXX_FAMILY_6351,
+               .name = "Marvell 88E6350",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+       },
+
+       [MV88E6351] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+               .family = MV88E6XXX_FAMILY_6351,
+               .name = "Marvell 88E6351",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+       },
+
+       [MV88E6352] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+               .family = MV88E6XXX_FAMILY_6352,
+               .name = "Marvell 88E6352",
+               .num_databases = 4096,
+               .num_ports = 7,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+       },
+};
+
+static const struct mv88e6xxx_info *
+mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
+                     unsigned int num)
 {
-       struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
-       int i, ret;
+       int i;
+
+       for (i = 0; i < num; ++i)
+               if (table[i].prod_num == prod_num)
+                       return &table[i];
 
+       return NULL;
+}
+
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+                                      struct device *host_dev, int sw_addr,
+                                      void **priv)
+{
+       const struct mv88e6xxx_info *info;
+       struct mv88e6xxx_priv_state *ps;
+       struct mii_bus *bus;
+       const char *name;
+       int id, prod_num, rev;
+
+       bus = dsa_host_dev_to_mii_bus(host_dev);
        if (!bus)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
-       if (ret < 0)
+       id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
+       if (id < 0)
                return NULL;
 
-       /* Look up the exact switch ID */
-       for (i = 0; i < num; ++i)
-               if (table[i].id == ret)
-                       return table[i].name;
-
-       /* Look up only the product number */
-       for (i = 0; i < num; ++i) {
-               if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
-                       dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
-                                ret & PORT_SWITCH_ID_REV_MASK,
-                                ret & PORT_SWITCH_ID_PROD_NUM_MASK);
-                       return table[i].name;
+       prod_num = (id & 0xfff0) >> 4;
+       rev = id & 0x000f;
+
+       info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+                                    ARRAY_SIZE(mv88e6xxx_table));
+       if (!info)
+               return NULL;
+
+       name = info->name;
+
+       ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
+       if (!ps)
+               return NULL;
+
+       ps->bus = bus;
+       ps->sw_addr = sw_addr;
+       ps->info = info;
+       mutex_init(&ps->smi_mutex);
+
+       *priv = ps;
+
+       dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
+                prod_num, name, rev);
+
+       return name;
+}
+
+struct dsa_switch_driver mv88e6xxx_switch_driver = {
+       .tag_protocol           = DSA_TAG_PROTO_EDSA,
+       .probe                  = mv88e6xxx_drv_probe,
+       .setup                  = mv88e6xxx_setup,
+       .set_addr               = mv88e6xxx_set_addr,
+       .phy_read               = mv88e6xxx_phy_read,
+       .phy_write              = mv88e6xxx_phy_write,
+       .adjust_link            = mv88e6xxx_adjust_link,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
+       .set_eee                = mv88e6xxx_set_eee,
+       .get_eee                = mv88e6xxx_get_eee,
+#ifdef CONFIG_NET_DSA_HWMON
+       .get_temp               = mv88e6xxx_get_temp,
+       .get_temp_limit         = mv88e6xxx_get_temp_limit,
+       .set_temp_limit         = mv88e6xxx_set_temp_limit,
+       .get_temp_alarm         = mv88e6xxx_get_temp_alarm,
+#endif
+       .get_eeprom_len         = mv88e6xxx_get_eeprom_len,
+       .get_eeprom             = mv88e6xxx_get_eeprom,
+       .set_eeprom             = mv88e6xxx_set_eeprom,
+       .get_regs_len           = mv88e6xxx_get_regs_len,
+       .get_regs               = mv88e6xxx_get_regs,
+       .port_bridge_join       = mv88e6xxx_port_bridge_join,
+       .port_bridge_leave      = mv88e6xxx_port_bridge_leave,
+       .port_stp_state_set     = mv88e6xxx_port_stp_state_set,
+       .port_vlan_filtering    = mv88e6xxx_port_vlan_filtering,
+       .port_vlan_prepare      = mv88e6xxx_port_vlan_prepare,
+       .port_vlan_add          = mv88e6xxx_port_vlan_add,
+       .port_vlan_del          = mv88e6xxx_port_vlan_del,
+       .port_vlan_dump         = mv88e6xxx_port_vlan_dump,
+       .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
+       .port_fdb_add           = mv88e6xxx_port_fdb_add,
+       .port_fdb_del           = mv88e6xxx_port_fdb_del,
+       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
+};
+
+int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+       struct device *dev = &mdiodev->dev;
+       struct device_node *np = dev->of_node;
+       struct mv88e6xxx_priv_state *ps;
+       int id, prod_num, rev;
+       struct dsa_switch *ds;
+       u32 eeprom_len;
+       int err;
+
+       ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
+       if (!ds)
+               return -ENOMEM;
+
+       ps = (struct mv88e6xxx_priv_state *)(ds + 1);
+       ds->priv = ps;
+       ds->dev = dev;
+       ps->dev = dev;
+       ps->ds = ds;
+       ps->bus = mdiodev->bus;
+       ps->sw_addr = mdiodev->addr;
+       mutex_init(&ps->smi_mutex);
+
+       get_device(&ps->bus->dev);
+
+       ds->drv = &mv88e6xxx_switch_driver;
+
+       id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
+       if (id < 0)
+               return id;
+
+       prod_num = (id & 0xfff0) >> 4;
+       rev = id & 0x000f;
+
+       ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+                                        ARRAY_SIZE(mv88e6xxx_table));
+       if (!ps->info)
+               return -ENODEV;
+
+       ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
+       if (IS_ERR(ps->reset)) {
+               err = PTR_ERR(ps->reset);
+               if (err == -ENOENT) {
+                       /* Optional, so not an error */
+                       ps->reset = NULL;
+               } else {
+                       return err;
                }
        }
 
-       return NULL;
+       if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
+           !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+               ps->eeprom_len = eeprom_len;
+
+       dev_set_drvdata(dev, ds);
+
+       dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
+                prod_num, ps->info->name, rev);
+
+       return 0;
 }
 
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       put_device(&ps->bus->dev);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+       { .compatible = "marvell,mv88e6085" },
+       { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+       .probe  = mv88e6xxx_probe,
+       .remove = mv88e6xxx_remove,
+       .mdiodrv.driver = {
+               .name = "mv88e6085",
+               .of_match_table = mv88e6xxx_of_match,
+       },
+};
+
 static int __init mv88e6xxx_init(void)
 {
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
-       register_switch_driver(&mv88e6131_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
-       register_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
-       register_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
-       register_switch_driver(&mv88e6171_switch_driver);
-#endif
-       return 0;
+       register_switch_driver(&mv88e6xxx_switch_driver);
+       return mdio_driver_register(&mv88e6xxx_driver);
 }
 module_init(mv88e6xxx_init);
 
 static void __exit mv88e6xxx_cleanup(void)
 {
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
-       unregister_switch_driver(&mv88e6171_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
-       unregister_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
-       unregister_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
-       unregister_switch_driver(&mv88e6131_switch_driver);
-#endif
+       mdio_driver_unregister(&mv88e6xxx_driver);
+       unregister_switch_driver(&mv88e6xxx_switch_driver);
 }
 module_exit(mv88e6xxx_cleanup);
 
index 236bcaa606e73d2e7dea13295c0570ead096ea8c..40e8721ecfb1d0832697bf653f90d2d84792f011 100644 (file)
@@ -12,6 +12,7 @@
 #define __MV88E6XXX_H
 
 #include <linux/if_vlan.h>
+#include <linux/gpio/consumer.h>
 
 #ifndef UINT64_MAX
 #define UINT64_MAX             (u64)(~((u64)0))
 #define PORT_PCS_CTRL_UNFORCED         0x03
 #define PORT_PAUSE_CTRL                0x02
 #define PORT_SWITCH_ID         0x03
-#define PORT_SWITCH_ID_PROD_NUM_MASK   0xfff0
-#define PORT_SWITCH_ID_REV_MASK                0x000f
-#define PORT_SWITCH_ID_6031    0x0310
-#define PORT_SWITCH_ID_6035    0x0350
-#define PORT_SWITCH_ID_6046    0x0480
-#define PORT_SWITCH_ID_6061    0x0610
-#define PORT_SWITCH_ID_6065    0x0650
-#define PORT_SWITCH_ID_6085    0x04a0
-#define PORT_SWITCH_ID_6092    0x0970
-#define PORT_SWITCH_ID_6095    0x0950
-#define PORT_SWITCH_ID_6096    0x0980
-#define PORT_SWITCH_ID_6097    0x0990
-#define PORT_SWITCH_ID_6108    0x1070
-#define PORT_SWITCH_ID_6121    0x1040
-#define PORT_SWITCH_ID_6122    0x1050
-#define PORT_SWITCH_ID_6123    0x1210
-#define PORT_SWITCH_ID_6123_A1 0x1212
-#define PORT_SWITCH_ID_6123_A2 0x1213
-#define PORT_SWITCH_ID_6131    0x1060
-#define PORT_SWITCH_ID_6131_B2 0x1066
-#define PORT_SWITCH_ID_6152    0x1a40
-#define PORT_SWITCH_ID_6155    0x1a50
-#define PORT_SWITCH_ID_6161    0x1610
-#define PORT_SWITCH_ID_6161_A1 0x1612
-#define PORT_SWITCH_ID_6161_A2 0x1613
-#define PORT_SWITCH_ID_6165    0x1650
-#define PORT_SWITCH_ID_6165_A1 0x1652
-#define PORT_SWITCH_ID_6165_A2 0x1653
-#define PORT_SWITCH_ID_6171    0x1710
-#define PORT_SWITCH_ID_6172    0x1720
-#define PORT_SWITCH_ID_6175    0x1750
-#define PORT_SWITCH_ID_6176    0x1760
-#define PORT_SWITCH_ID_6182    0x1a60
-#define PORT_SWITCH_ID_6185    0x1a70
-#define PORT_SWITCH_ID_6240    0x2400
-#define PORT_SWITCH_ID_6320    0x1150
-#define PORT_SWITCH_ID_6320_A1 0x1151
-#define PORT_SWITCH_ID_6320_A2 0x1152
-#define PORT_SWITCH_ID_6321    0x3100
-#define PORT_SWITCH_ID_6321_A1 0x3101
-#define PORT_SWITCH_ID_6321_A2 0x3102
-#define PORT_SWITCH_ID_6350    0x3710
-#define PORT_SWITCH_ID_6351    0x3750
-#define PORT_SWITCH_ID_6352    0x3520
-#define PORT_SWITCH_ID_6352_A0 0x3521
-#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_SWITCH_ID_PROD_NUM_6085   0x04a
+#define PORT_SWITCH_ID_PROD_NUM_6095   0x095
+#define PORT_SWITCH_ID_PROD_NUM_6131   0x106
+#define PORT_SWITCH_ID_PROD_NUM_6320   0x115
+#define PORT_SWITCH_ID_PROD_NUM_6123   0x121
+#define PORT_SWITCH_ID_PROD_NUM_6161   0x161
+#define PORT_SWITCH_ID_PROD_NUM_6165   0x165
+#define PORT_SWITCH_ID_PROD_NUM_6171   0x171
+#define PORT_SWITCH_ID_PROD_NUM_6172   0x172
+#define PORT_SWITCH_ID_PROD_NUM_6175   0x175
+#define PORT_SWITCH_ID_PROD_NUM_6176   0x176
+#define PORT_SWITCH_ID_PROD_NUM_6185   0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6240   0x240
+#define PORT_SWITCH_ID_PROD_NUM_6321   0x310
+#define PORT_SWITCH_ID_PROD_NUM_6352   0x352
+#define PORT_SWITCH_ID_PROD_NUM_6350   0x371
+#define PORT_SWITCH_ID_PROD_NUM_6351   0x375
 #define PORT_CONTROL           0x04
 #define PORT_CONTROL_USE_CORE_TAG      BIT(15)
 #define PORT_CONTROL_DROP_ON_LOCK      BIT(14)
 
 #define MV88E6XXX_N_FID                4096
 
-struct mv88e6xxx_switch_id {
-       u16 id;
-       char *name;
+/* List of supported models */
+enum mv88e6xxx_model {
+       MV88E6085,
+       MV88E6095,
+       MV88E6123,
+       MV88E6131,
+       MV88E6161,
+       MV88E6165,
+       MV88E6171,
+       MV88E6172,
+       MV88E6175,
+       MV88E6176,
+       MV88E6185,
+       MV88E6240,
+       MV88E6320,
+       MV88E6321,
+       MV88E6350,
+       MV88E6351,
+       MV88E6352,
+};
+
+enum mv88e6xxx_family {
+       MV88E6XXX_FAMILY_NONE,
+       MV88E6XXX_FAMILY_6065,  /* 6031 6035 6061 6065 */
+       MV88E6XXX_FAMILY_6095,  /* 6092 6095 */
+       MV88E6XXX_FAMILY_6097,  /* 6046 6085 6096 6097 */
+       MV88E6XXX_FAMILY_6165,  /* 6123 6161 6165 */
+       MV88E6XXX_FAMILY_6185,  /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+       MV88E6XXX_FAMILY_6320,  /* 6320 6321 */
+       MV88E6XXX_FAMILY_6351,  /* 6171 6175 6350 6351 */
+       MV88E6XXX_FAMILY_6352,  /* 6172 6176 6240 6352 */
+};
+
+enum mv88e6xxx_cap {
+       /* Address Translation Unit.
+        * The ATU is used to lookup and learn MAC addresses. See GLOBAL_ATU_OP.
+        */
+       MV88E6XXX_CAP_ATU,
+
+       /* Energy Efficient Ethernet.
+        */
+       MV88E6XXX_CAP_EEE,
+
+       /* EEPROM Command and Data registers.
+        * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA.
+        */
+       MV88E6XXX_CAP_EEPROM,
+
+       /* Port State Filtering for 802.1D Spanning Tree.
+        * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
+        */
+       MV88E6XXX_CAP_PORTSTATE,
+
+       /* PHY Polling Unit.
+        * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
+        */
+       MV88E6XXX_CAP_PPU,
+       MV88E6XXX_CAP_PPU_ACTIVE,
+
+       /* SMI PHY Command and Data registers.
+        * This requires an indirect access to PHY registers through
+        * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
+        */
+       MV88E6XXX_CAP_SMI_PHY,
+
+       /* Per VLAN Spanning Tree Unit (STU).
+        * The Port State database, if present, is accessed through VTU
+        * operations and dedicated SID registers. See GLOBAL_VTU_SID.
+        */
+       MV88E6XXX_CAP_STU,
+
+       /* Switch MAC/WoL/WoF register.
+        * This requires an indirect access to set the switch MAC address
+        * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23,
+        * and GLOBAL_MAC_45 are used with a direct access.
+        */
+       MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF,
+
+       /* Internal temperature sensor.
+        * Available from any enabled port's PHY register 26, page 6.
+        */
+       MV88E6XXX_CAP_TEMP,
+       MV88E6XXX_CAP_TEMP_LIMIT,
+
+       /* In-chip Port Based VLANs.
+        * Each port VLANTable register (see PORT_BASE_VLAN) is used to restrict
+        * the output (or egress) ports to which it is allowed to send frames.
+        */
+       MV88E6XXX_CAP_VLANTABLE,
+
+       /* VLAN Table Unit.
+        * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
+        */
+       MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_ATU             BIT(MV88E6XXX_CAP_ATU)
+#define MV88E6XXX_FLAG_EEE             BIT(MV88E6XXX_CAP_EEE)
+#define MV88E6XXX_FLAG_EEPROM          BIT(MV88E6XXX_CAP_EEPROM)
+#define MV88E6XXX_FLAG_PORTSTATE       BIT(MV88E6XXX_CAP_PORTSTATE)
+#define MV88E6XXX_FLAG_PPU             BIT(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE      BIT(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_SMI_PHY         BIT(MV88E6XXX_CAP_SMI_PHY)
+#define MV88E6XXX_FLAG_STU             BIT(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_SWITCH_MAC      BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF)
+#define MV88E6XXX_FLAG_TEMP            BIT(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT      BIT(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VLANTABLE       BIT(MV88E6XXX_CAP_VLANTABLE)
+#define MV88E6XXX_FLAG_VTU             BIT(MV88E6XXX_CAP_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_PPU |           \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_PPU |           \
+        MV88E6XXX_FLAG_STU |           \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165    \
+       (MV88E6XXX_FLAG_STU |           \
+        MV88E6XXX_FLAG_SWITCH_MAC |    \
+        MV88E6XXX_FLAG_TEMP |          \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_PPU |           \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_EEE |           \
+        MV88E6XXX_FLAG_EEPROM |        \
+        MV88E6XXX_FLAG_PORTSTATE |     \
+        MV88E6XXX_FLAG_PPU_ACTIVE |    \
+        MV88E6XXX_FLAG_SMI_PHY |       \
+        MV88E6XXX_FLAG_SWITCH_MAC |    \
+        MV88E6XXX_FLAG_TEMP |          \
+        MV88E6XXX_FLAG_TEMP_LIMIT |    \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_PORTSTATE |     \
+        MV88E6XXX_FLAG_PPU_ACTIVE |    \
+        MV88E6XXX_FLAG_SMI_PHY |       \
+        MV88E6XXX_FLAG_STU |           \
+        MV88E6XXX_FLAG_SWITCH_MAC |    \
+        MV88E6XXX_FLAG_TEMP |          \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352    \
+       (MV88E6XXX_FLAG_ATU |           \
+        MV88E6XXX_FLAG_EEE |           \
+        MV88E6XXX_FLAG_EEPROM |        \
+        MV88E6XXX_FLAG_PORTSTATE |     \
+        MV88E6XXX_FLAG_PPU_ACTIVE |    \
+        MV88E6XXX_FLAG_SMI_PHY |       \
+        MV88E6XXX_FLAG_STU |           \
+        MV88E6XXX_FLAG_SWITCH_MAC |    \
+        MV88E6XXX_FLAG_TEMP |          \
+        MV88E6XXX_FLAG_TEMP_LIMIT |    \
+        MV88E6XXX_FLAG_VLANTABLE |     \
+        MV88E6XXX_FLAG_VTU)
+
+struct mv88e6xxx_info {
+       enum mv88e6xxx_family family;
+       u16 prod_num;
+       const char *name;
+       unsigned int num_databases;
+       unsigned int num_ports;
+       unsigned long flags;
 };
 
 struct mv88e6xxx_atu_entry {
@@ -397,13 +547,26 @@ struct mv88e6xxx_priv_port {
 };
 
 struct mv88e6xxx_priv_state {
+       const struct mv88e6xxx_info *info;
+
+       /* The dsa_switch this private structure is related to */
+       struct dsa_switch *ds;
+
+       /* The device this structure is associated to */
+       struct device *dev;
+
        /* When using multi-chip addressing, this mutex protects
         * access to the indirect access registers.  (In single-chip
         * mode, this mutex is effectively useless.)
         */
        struct mutex    smi_mutex;
 
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+       /* The MII bus and the address on the bus that is used to
+        * communication with the switch
+        */
+       struct mii_bus *bus;
+       int sw_addr;
+
        /* Handles automatic disabling and re-enabling of the PHY
         * polling unit.
         */
@@ -411,7 +574,6 @@ struct mv88e6xxx_priv_state {
        int                     ppu_disabled;
        struct work_struct      ppu_work;
        struct timer_list       ppu_timer;
-#endif
 
        /* This mutex serialises access to the statistics unit.
         * Hold this mutex over snapshot + dump sequences.
@@ -429,14 +591,20 @@ struct mv88e6xxx_priv_state {
         */
        struct mutex eeprom_mutex;
 
-       int             id; /* switch product id */
-       int             num_ports;      /* number of switch ports */
-
        struct mv88e6xxx_priv_port      ports[DSA_MAX_PORTS];
 
        DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
 
        struct work_struct bridge_work;
+
+       /* A switch may have a GPIO line tied to its reset pin. Parse
+        * this from the device tree, and use it before performing
+        * switch soft reset.
+        */
+       struct gpio_desc *reset;
+
+       /* set to size of eeprom if supported by the switch */
+       int             eeprom_len;
 };
 
 enum stat_type {
@@ -452,104 +620,10 @@ struct mv88e6xxx_hw_stat {
        enum stat_type type;
 };
 
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
-                           const struct mv88e6xxx_switch_id *table,
-                           unsigned int num);
-int mv88e6xxx_setup_ports(struct dsa_switch *ds);
-int mv88e6xxx_setup_common(struct dsa_switch *ds);
-int mv88e6xxx_setup_global(struct dsa_switch *ds);
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
-                                u16 val);
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
-                           int regnum, u16 val);
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
-                                uint64_t *data);
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
-int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
-                          struct phy_device *phydev);
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
-                       struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
-                                u16 val);
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
-                     struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-                              struct net_device *bridge);
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
-void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
-                                 bool vlan_filtering);
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-                               const struct switchdev_obj_port_vlan *vlan,
-                               struct switchdev_trans *trans);
-void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-                            const struct switchdev_obj_port_vlan *vlan,
-                            struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
-                           const struct switchdev_obj_port_vlan *vlan);
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
-                            struct switchdev_obj_port_vlan *vlan,
-                            int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
-                              const struct switchdev_obj_port_fdb *fdb,
-                              struct switchdev_trans *trans);
-void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-                           const struct switchdev_obj_port_fdb *fdb,
-                           struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-                          const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
-                           struct switchdev_obj_port_fdb *fdb,
-                           int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
-int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
-                            int reg, int val);
-
-extern struct dsa_switch_driver mv88e6131_switch_driver;
-extern struct dsa_switch_driver mv88e6123_switch_driver;
-extern struct dsa_switch_driver mv88e6352_switch_driver;
-extern struct dsa_switch_driver mv88e6171_switch_driver;
-
-#define REG_READ(addr, reg)                                            \
-       ({                                                              \
-               int __ret;                                              \
-                                                                       \
-               __ret = mv88e6xxx_reg_read(ds, addr, reg);              \
-               if (__ret < 0)                                          \
-                       return __ret;                                   \
-               __ret;                                                  \
-       })
-
-#define REG_WRITE(addr, reg, val)                                      \
-       ({                                                              \
-               int __ret;                                              \
-                                                                       \
-               __ret = mv88e6xxx_reg_write(ds, addr, reg, val);        \
-               if (__ret < 0)                                          \
-                       return __ret;                                   \
-       })
-
-
+static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps,
+                                unsigned long flags)
+{
+       return (ps->info->flags & flags) == flags;
+}
 
 #endif
index 7677c745fb30b38bf9371e7ada66c87f14bd4184..91ada52f776b67b84be5c02d1daff13fe85b8e0d 100644 (file)
@@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev)
                dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
                inw(ioaddr + TX_FREE));
        dev->stats.tx_errors++;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        /* Issue TX_RESET and TX_START commands. */
        outw(TxReset, ioaddr + EL3_CMD);
        outw(TxEnable, ioaddr + EL3_CMD);
index 942fb0d5aacebf0b7f565836979b7d61ab3b9cfb..b26e038b4a0e904861262cc6c0bcf570d3a2d6d0 100644 (file)
@@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev)
                if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
                        break;
        outw(TxEnable, ioaddr + EL3_CMD);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        dev->stats.tx_dropped++;
        netif_wake_queue(dev);
index b9948f00c5e9d4121099190adb653ac2d3f48183..b88afd7593074d83bd878882d3fc31a4306dd654 100644 (file)
@@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev)
        netdev_notice(dev, "Transmit timed out!\n");
        dump_status(dev);
        dev->stats.tx_errors++;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        /* Issue TX_RESET and TX_START commands. */
        tc574_wait_for_completion(dev, TxReset);
        outw(TxEnable, ioaddr + EL3_CMD);
index c5a320507556a83adf4595ef5a6071e81a056848..71396e4b87e399f757d27828adc97c2a00db8660 100644 (file)
@@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev)
        netdev_warn(dev, "Transmit timed out!\n");
        dump_status(dev);
        dev->stats.tx_errors++;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        /* Issue TX_RESET and TX_START commands. */
        tc589_wait_for_completion(dev, TxReset);
        outw(TxEnable, ioaddr + EL3_CMD);
index d81fceddbe0e86d2f6489c3210051fe7265a6560..25c55ab05c7dd2ba604f25be756130c042449c71 100644 (file)
@@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev)
        }
        /* Issue Tx Enable */
        iowrite16(TxEnable, ioaddr + EL3_CMD);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 }
 
 /*
index ec6eac1f8c95ab79d33209e272a31f71484b0f62..4ea717d68c957a38704ec403ccc80712274bc0a9 100644 (file)
@@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
        {
                ei_local->txing = 1;
                NS8390_trigger_send(dev, send_length, output_page);
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                if (output_page == ei_local->tx_start_page) 
                {
                        ei_local->tx1 = -1;
@@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev)
                {
                        ei_local->txing = 1;
                        NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        ei_local->tx2 = -1,
                        ei_local->lasttx = 2;
                }
@@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev)
                {
                        ei_local->txing = 1;
                        NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        ei_local->tx1 = -1;
                        ei_local->lasttx = 1;
                }
index b96e8852b2d195109b92ff6dd41a2b04a02d5389..60f8e2c8e726664f5b5b58d054473f59e8cd0ffc 100644 (file)
@@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev)
                if (ei_local->tx2 > 0) {
                        ei_local->txing = 1;
                        NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        ei_local->tx2 = -1,
                        ei_local->lasttx = 2;
                } else
@@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev)
                if (ei_local->tx1 > 0) {
                        ei_local->txing = 1;
                        NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        ei_local->tx1 = -1;
                        ei_local->lasttx = 1;
                } else
index ac7288240d5534cada9015d098cbef3e37139491..1d1069641d81f3a2dbd405a66a3cf5de044fccb4 100644 (file)
@@ -1129,7 +1129,7 @@ static void tx_timeout(struct net_device *dev)
 
        /* Trigger an immediate transmit demand. */
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        netif_wake_queue(dev);
 }
index 74139cb7f84920ff2387204b782a08881a3ea6c8..3d2245fdc283edea9660db540f1beec17a63e8bd 100644 (file)
@@ -1430,7 +1430,7 @@ static void bfin_mac_timeout(struct net_device *dev)
        bfin_mac_enable(lp->phydev);
 
        /* We can accept TX packets again */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static void bfin_mac_multicast_hash(struct net_device *dev)
index 0907ab6ff309e65ac4ff56250443dc23d6904b1c..30defe6c81f22d6463c4df091a421014392f6ff2 100644 (file)
@@ -3349,7 +3349,7 @@ static void et131x_down(struct net_device *netdev)
        struct et131x_adapter *adapter = netdev_priv(netdev);
 
        /* Save the timestamp for the TX watchdog, prevent a timeout */
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        phy_stop(adapter->phydev);
        et131x_disable_txrx(netdev);
@@ -3816,7 +3816,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
                netif_stop_queue(netdev);
 
        /* Save the timestamp for the TX timeout watchdog */
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        /* TCB is not available */
        if (tx_ring->used >= NUM_TCB)
index 8d50314ac3eb1f308d1cc556270058aba05c7b60..de2c4bf5fac461839b929bae5033c877130cc05d 100644 (file)
@@ -428,7 +428,7 @@ static void emac_timeout(struct net_device *dev)
        emac_reset(db);
        emac_init_device(dev);
        /* We can accept TX packets again */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
 
        /* Restore previous register address */
@@ -468,7 +468,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
                       db->membase + EMAC_TX_CTL0_REG);
 
                /* save the time stamp */
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
        } else if (channel == 1) {
                /* set TX len */
                writel(skb->len, db->membase + EMAC_TX_PL1_REG);
@@ -477,7 +477,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
                       db->membase + EMAC_TX_CTL1_REG);
 
                /* save the time stamp */
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
        }
 
        if ((db->tx_fifo_stat & 3) == 3) {
index 66d0b73c39c03ba2a050c70e49f6cba4498f47c1..dcf2a1f3643d79aa3bf7971c5dda37c22561a27a 100644 (file)
@@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev)
 
        load_csrs(lp);
        lance_init_ring(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        status = init_restart_lance(lp);
 #ifdef DEBUG_DRIVER
        printk("Lance restart=%d\n", status);
@@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev)
 {
        printk("lance_tx_timeout\n");
        lance_reset(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
        static int outs;
        unsigned long flags;
 
-       if (!TX_BUFFS_AVAIL)
-               return NETDEV_TX_LOCKED;
-
        netif_stop_queue(dev);
 
+       if (!TX_BUFFS_AVAIL) {
+               dev_consume_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        skblen = skb->len;
 
 #ifdef DEBUG_DRIVER
index 56139184b8019c132428654a5af6a4bd4018de14..a83cd1c4ce1d051885050c6d5b7b257fce562c62 100644 (file)
@@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev)
        load_csrs(lp);
 
        lance_init_ring(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_start_queue(dev);
 
        status = init_restart_lance(lp);
@@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 
        local_irq_save(flags);
 
-       if (!lance_tx_buffs_avail(lp)) {
-               local_irq_restore(flags);
-               return NETDEV_TX_LOCKED;
-       }
+       if (!lance_tx_buffs_avail(lp))
+               goto out_free;
 
 #ifdef DEBUG
        /* dump the packet */
@@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 
        /* Kick the lance: transmit now */
        ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
        dev_kfree_skb(skb);
 
        local_irq_restore(flags);
index b10964e8cb5469ce6bc10fa8f9524f06663b78b3..d2bc8e5dcd23b21dab55b377961d358b55a29ede 100644 (file)
@@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev)
        /* lance_restart, essentially */
        lance_init_ring(dev);
        REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index d3977d032b488e8a571f37e633cd012b04e7e7d4..9af309e017fdadfa67cb7200a1c3d3cd76252c83 100644 (file)
@@ -1074,7 +1074,7 @@ static void au1000_tx_timeout(struct net_device *dev)
        netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
        au1000_reset_mac(dev);
        au1000_init(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index b584b78237dfdbf9ae50618c2b992459137b2739..b799c7ac899b984bbeeacfc55f0d0917c7e2a037 100644 (file)
@@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev)
 
        lance_init_ring(dev);
        load_csrs(lp);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        status = init_restart_lance(lp);
        return status;
 }
index 3a7ebfdda57dee33cbde71cf3440e5eb70aa262a..abb1ba228b26512f020d89f5123faaa1a9a7e3a4 100644 (file)
@@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev)
 #endif
        lance_restart (dev, 0x0043, 1);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue (dev);
 }
 
index 1cf33addd15e23ee7cbe987c9a82f19588ffa178..cda53db75f17bdc957bcf3dcce20eda52dd04daa 100644 (file)
@@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
                if(!p->lock)
                        if (p->tmdnum || !p->xmit_queued)
                                netif_wake_queue(dev);
-               dev->trans_start = jiffies; /* prevent tx timeout */
+               netif_trans_update(dev); /* prevent tx timeout */
        }
        else
                writedatareg(CSR0_STRT | csr0);
@@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev)
                printk("%02x ",p->tmdhead[i].u.s.status);
        printk("\n");
        ni65_lance_reinit(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index 27245efe9f50098eee594cb844e5e1647978bf3d..2807e181647bfde7b81b4e6ecd648be2a9c40ef2 100644 (file)
@@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev)
 #else /* #if RESET_ON_TIMEOUT */
   pr_cont("NOT resetting card\n");
 #endif /* #if RESET_ON_TIMEOUT */
-  dev->trans_start = jiffies; /* prevent tx timeout */
+  netif_trans_update(dev); /* prevent tx timeout */
   netif_wake_queue(dev);
 }
 
index 7ccebae9cb48794cd3977a6fb49844a5bc57e75a..c22bf52d3320b4831f944ceef01770264acc3c6b 100644 (file)
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
 {
        struct pcnet32_private *lp = netdev_priv(dev);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        napi_disable(&lp->napi);
        netif_tx_disable(dev);
 }
@@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
        }
        pcnet32_restart(dev, CSR0_NORMAL);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 
        spin_unlock_irqrestore(&lp->lock, flags);
index 7847638bdd224c55fe1bb2c7d4638628ce5687e9..9b56b40259dccbfe14bffff5ce246cc71bed17bd 100644 (file)
@@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev)
        }
        lp->init_ring(dev);
        load_csrs(lp);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        status = init_restart_lance(lp);
        return status;
 }
index b212488606da4877117e7e9f617a169f2d62f282..64792880e9407b38944113a6bcbe42b67fac8405 100644 (file)
@@ -729,6 +729,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
        return xgene_cle_setup_ptree(pdata, enet_cle);
 }
 
-struct xgene_cle_ops xgene_cle3in_ops = {
+const struct xgene_cle_ops xgene_cle3in_ops = {
        .cle_init = xgene_enet_cle_init,
 };
index 29a17abdd828174cb3f5e4fad10458c0df26efca..13e829ab9053a5d996596efcdfe3c1bb2f264820 100644 (file)
@@ -290,6 +290,6 @@ struct xgene_enet_cle {
        u32 jump_bytes;
 };
 
-extern struct xgene_cle_ops xgene_cle3in_ops;
+extern const struct xgene_cle_ops xgene_cle3in_ops;
 
 #endif /* __XGENE_ENET_CLE_H__ */
index 39e081a70f5b4f5bb9b674d7fe9c470f6d807b80..457f745002424fc96817721b00b2261ff5bfb926 100644 (file)
@@ -824,7 +824,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
                return -EINVAL;
 
        phy = get_phy_device(mdio, phy_id, false);
-       if (!phy || IS_ERR(phy))
+       if (IS_ERR(phy))
                return -EIO;
 
        ret = phy_device_register(phy);
index 8d4c1ad2fc6051f66ffc059456927511e9dd2fe3..aa87049c353d264adcd3839b7af1639639222c5d 100644 (file)
@@ -973,6 +973,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
        return owner;
 }
 
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+       struct device *dev = &pdata->pdev->dev;
+       u32 cpu_bufnum;
+       int ret;
+
+       ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+       return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
 static int xgene_enet_create_desc_rings(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -981,13 +992,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        struct xgene_enet_desc_ring *buf_pool = NULL;
        enum xgene_ring_owner owner;
        dma_addr_t dma_exp_bufs;
-       u8 cpu_bufnum = pdata->cpu_bufnum;
+       u8 cpu_bufnum;
        u8 eth_bufnum = pdata->eth_bufnum;
        u8 bp_bufnum = pdata->bp_bufnum;
        u16 ring_num = pdata->ring_num;
        u16 ring_id;
        int i, ret, size;
 
+       cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
        for (i = 0; i < pdata->rxq_cnt; i++) {
                /* allocate rx descriptor ring */
                owner = xgene_derive_ring_owner(pdata);
@@ -1595,21 +1608,22 @@ static int xgene_enet_probe(struct platform_device *pdev)
 
        ret = xgene_enet_init_hw(pdata);
        if (ret)
-               goto err;
+               goto err_netdev;
 
        mac_ops = pdata->mac_ops;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
                ret = xgene_enet_mdio_config(pdata);
                if (ret)
-                       goto err;
+                       goto err_netdev;
        } else {
                INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
        }
 
        xgene_enet_napi_add(pdata);
        return 0;
-err:
+err_netdev:
        unregister_netdev(ndev);
+err:
        free_netdev(ndev);
        return ret;
 }
index 175d18890c7a7da01895d7970ef653ce0170c01a..0a2887b96a4256d389729908e47a142d07218936 100644 (file)
@@ -191,7 +191,7 @@ struct xgene_enet_pdata {
        const struct xgene_mac_ops *mac_ops;
        const struct xgene_port_ops *port_ops;
        struct xgene_ring_ops *ring_ops;
-       struct xgene_cle_ops *cle_ops;
+       const struct xgene_cle_ops *cle_ops;
        struct delayed_work link_work;
        u32 port_id;
        u8 cpu_bufnum;
index 55b118e876fdece9343fa3c24296eac14b6fa411..9fe8b5e310d15a024aec48456f43444137637138 100644 (file)
@@ -745,7 +745,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
 
 static void alx_netif_stop(struct alx_priv *alx)
 {
-       alx->dev->trans_start = jiffies;
+       netif_trans_update(alx->dev);
        if (netif_carrier_ok(alx->dev)) {
                netif_carrier_off(alx->dev);
                netif_tx_disable(alx->dev);
index b9203d9289381940ca86a309d1cf78bd20ce9657..c46b489ce9b4d7f0fb46baa2497a0e22723b2159 100644 (file)
@@ -488,7 +488,7 @@ struct atl1c_tpd_ring {
        dma_addr_t dma;         /* descriptor ring physical address */
        u16 size;               /* descriptor ring length in bytes */
        u16 count;              /* number of descriptors in the ring */
-       u16 next_to_use;        /* this is protectd by adapter->tx_lock */
+       u16 next_to_use;
        atomic_t next_to_clean;
        struct atl1c_buffer *buffer_info;
 };
@@ -542,7 +542,6 @@ struct atl1c_adapter {
        u16 link_duplex;
 
        spinlock_t mdio_lock;
-       spinlock_t tx_lock;
        atomic_t irq_sem;
 
        struct work_struct common_task;
index d0084d4d1a9b718773ca2e6ecae238d285392999..a3200ea6d7656c58a4dc0fd65f572c8558dc0413 100644 (file)
@@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
        atl1c_set_rxbufsize(adapter, adapter->netdev);
        atomic_set(&adapter->irq_sem, 1);
        spin_lock_init(&adapter->mdio_lock);
-       spin_lock_init(&adapter->tx_lock);
        set_bit(__AT_DOWN, &adapter->flags);
 
        return 0;
@@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                                          struct net_device *netdev)
 {
        struct atl1c_adapter *adapter = netdev_priv(netdev);
-       unsigned long flags;
        u16 tpd_req = 1;
        struct atl1c_tpd_desc *tpd;
        enum atl1c_trans_queue type = atl1c_trans_normal;
@@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
        }
 
        tpd_req = atl1c_cal_tpd_req(skb);
-       if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
-               if (netif_msg_pktdata(adapter))
-                       dev_info(&adapter->pdev->dev, "tx locked\n");
-               return NETDEV_TX_LOCKED;
-       }
 
        if (atl1c_tpd_avail(adapter, type) < tpd_req) {
                /* no enough descriptor, just stop queue */
                netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
 
        /* do TSO and check sum */
        if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
@@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                           "tx-skb droppted due to dma error\n");
                /* roll back tpd/buffer */
                atl1c_tx_rollback(adapter, tpd, type);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
                dev_kfree_skb_any(skb);
        } else {
                netdev_sent_queue(adapter->netdev, skb->len);
                atl1c_tx_queue(adapter, skb, tpd, type);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
        }
 
        return NETDEV_TX_OK;
index 0212dac7e23a78eb862162a722eb81ac1aef1361..632bb843aed6a00fc43829dea4a2c813a023c6b1 100644 (file)
@@ -442,7 +442,6 @@ struct atl1e_adapter {
        u16 link_duplex;
 
        spinlock_t mdio_lock;
-       spinlock_t tx_lock;
        atomic_t irq_sem;
 
        struct work_struct reset_task;
index 59a03a193e8354285f838f6f47c3bc081cc18bdf..974713b19ab642d66e4c8c5f114739164cbd4669 100644 (file)
@@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter)
 
        atomic_set(&adapter->irq_sem, 1);
        spin_lock_init(&adapter->mdio_lock);
-       spin_lock_init(&adapter->tx_lock);
 
        set_bit(__AT_DOWN, &adapter->flags);
 
@@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
                                          struct net_device *netdev)
 {
        struct atl1e_adapter *adapter = netdev_priv(netdev);
-       unsigned long flags;
        u16 tpd_req = 1;
        struct atl1e_tpd_desc *tpd;
 
@@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
        tpd_req = atl1e_cal_tdp_req(skb);
-       if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
-               return NETDEV_TX_LOCKED;
 
        if (atl1e_tpd_avail(adapter) < tpd_req) {
                /* no enough descriptor, just stop queue */
                netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
 
        /* do TSO and check sum */
        if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
@@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
        }
 
        atl1e_tx_queue(adapter, tpd_req, tpd);
-
-       netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
 out:
-       spin_unlock_irqrestore(&adapter->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
@@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
 
        netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
                              NETIF_F_HW_VLAN_CTAG_RX;
-       netdev->features = netdev->hw_features | NETIF_F_LLTX |
-                          NETIF_F_HW_VLAN_CTAG_TX;
+       netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
        /* not enabled by default */
        netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
        return 0;
index 8f76f4558a88c15b0a14bb6ec3d9fb769fea9d0a..2ff465848b6553ecc0a646421e30cded0e8fb053 100644 (file)
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = -EIO;
 
-       netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
        /* Init PHY as early as possible due to power saving issue  */
index 993c780bdfab9ec35de3e1b0deca024c2615cca1..543bf38105c9240d9ae374708377755c5e4db9a6 100644 (file)
@@ -831,7 +831,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
        rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* re-enable RX interrupts */
                intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
        }
@@ -873,7 +873,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
                if (likely(napi_schedule_prep(&priv->napi))) {
                        /* disable RX interrupts */
                        intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
-                       __napi_schedule(&priv->napi);
+                       __napi_schedule_irqoff(&priv->napi);
                }
        }
 
@@ -916,7 +916,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
 
                if (likely(napi_schedule_prep(&txr->napi))) {
                        intrl2_1_mask_set(priv, BIT(ring));
-                       __napi_schedule(&txr->napi);
+                       __napi_schedule_irqoff(&txr->napi);
                }
        }
 
@@ -1117,7 +1117,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
 {
        netdev_warn(dev, "transmit timeout!\n");
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev->stats.tx_errors++;
 
        netif_tx_wake_all_queues(dev);
index 99b30a952b38736a825efc5149aa9027f6fae64d..38db2e4d7d540d27748ca05748c412b4a4d0215b 100644 (file)
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
                dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
        }
 
+       /* This (reset &) enable is not preset in specs or reference driver but
+        * Broadcom does it in arch PCI code when enabling fake PCI device.
+        */
+       bcma_core_enable(core, 0);
+
        /* Allocation and references */
        net_dev = alloc_etherdev(sizeof(*bgmac));
        if (!net_dev)
index 4fbb093e0d844031dd86cc3d7253eb44d10686d6..9a03c142b742502051b7e98a0a30c600684d8f80 100644 (file)
 #define  BGMAC_CMDCFG_TAI                      0x00000200
 #define  BGMAC_CMDCFG_HD                       0x00000400      /* Set if in half duplex mode */
 #define  BGMAC_CMDCFG_HD_SHIFT                 10
-#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for other revs */
-#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, only for core rev 4 */
-#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for core rev 0-3 */
+#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, for core rev >= 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
 #define  BGMAC_CMDCFG_ML                       0x00008000      /* Set to activate mac loopback mode */
 #define  BGMAC_CMDCFG_AE                       0x00400000
 #define  BGMAC_CMDCFG_CFE                      0x00800000
index 597e4724a474dafb1d3f68847b2d43a8781c1d70..6a5a71710fa9821c00f3697693672f958b262906 100644 (file)
@@ -122,6 +122,7 @@ static const u16 bnxt_async_events_arr[] = {
        HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
        HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
        HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+       HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
 };
 
 static bool bnxt_vf_pciid(enum board_idx idx)
@@ -587,12 +588,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        struct page *page;
        dma_addr_t mapping;
        u16 sw_prod = rxr->rx_sw_agg_prod;
+       unsigned int offset = 0;
 
-       page = alloc_page(gfp);
-       if (!page)
-               return -ENOMEM;
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = rxr->rx_page;
+               if (!page) {
+                       page = alloc_page(gfp);
+                       if (!page)
+                               return -ENOMEM;
+                       rxr->rx_page = page;
+                       rxr->rx_page_offset = 0;
+               }
+               offset = rxr->rx_page_offset;
+               rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+               if (rxr->rx_page_offset == PAGE_SIZE)
+                       rxr->rx_page = NULL;
+               else
+                       get_page(page);
+       } else {
+               page = alloc_page(gfp);
+               if (!page)
+                       return -ENOMEM;
+       }
 
-       mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+       mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&pdev->dev, mapping)) {
                __free_page(page);
@@ -607,6 +626,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 
        rx_agg_buf->page = page;
+       rx_agg_buf->offset = offset;
        rx_agg_buf->mapping = mapping;
        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
        rxbd->rx_bd_opaque = sw_prod;
@@ -648,6 +668,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
                page = cons_rx_buf->page;
                cons_rx_buf->page = NULL;
                prod_rx_buf->page = page;
+               prod_rx_buf->offset = cons_rx_buf->offset;
 
                prod_rx_buf->mapping = cons_rx_buf->mapping;
 
@@ -715,7 +736,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
                cons_rx_buf = &rxr->rx_agg_ring[cons];
-               skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+               skb_fill_page_desc(skb, i, cons_rx_buf->page,
+                                  cons_rx_buf->offset, frag_len);
                __clear_bit(cons, rxr->rx_agg_bmap);
 
                /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -746,7 +768,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                        return NULL;
                }
 
-               dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+               dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
 
                skb->data_len += frag_len;
@@ -1257,6 +1279,21 @@ static int bnxt_async_event_process(struct bnxt *bp,
 
        /* TODO CHIMP_FW: Define event id's for link change, error etc */
        switch (event_id) {
+       case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+               u32 data1 = le32_to_cpu(cmpl->event_data1);
+               struct bnxt_link_info *link_info = &bp->link_info;
+
+               if (BNXT_VF(bp))
+                       goto async_event_process_exit;
+               if (data1 & 0x20000) {
+                       u16 fw_speed = link_info->force_link_speed;
+                       u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+                       netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+                                   speed);
+               }
+               /* fall thru */
+       }
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
                break;
@@ -1402,6 +1439,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (!TX_CMP_VALID(txcmp, raw_cons))
                        break;
 
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               rmb();
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
@@ -1619,13 +1660,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                        dma_unmap_page(&pdev->dev,
                                       dma_unmap_addr(rx_agg_buf, mapping),
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
                        rx_agg_buf->page = NULL;
                        __clear_bit(j, rxr->rx_agg_bmap);
 
                        __free_page(page);
                }
+               if (rxr->rx_page) {
+                       __free_page(rxr->rx_page);
+                       rxr->rx_page = NULL;
+               }
        }
 }
 
@@ -2008,7 +2053,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
                return 0;
 
-       type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+       type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
                RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
 
        bnxt_init_rxbd_pages(ring, type);
@@ -2199,7 +2244,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
        bp->rx_agg_nr_pages = 0;
 
        if (bp->flags & BNXT_FLAG_TPA)
-               agg_factor = 4;
+               agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
 
        bp->flags &= ~BNXT_FLAG_JUMBO;
        if (rx_space > PAGE_SIZE) {
@@ -3060,12 +3105,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
                /* Number of segs are log2 units, and first packet is not
                 * included as part of this units.
                 */
-               if (mss <= PAGE_SIZE) {
-                       n = PAGE_SIZE / mss;
+               if (mss <= BNXT_RX_PAGE_SIZE) {
+                       n = BNXT_RX_PAGE_SIZE / mss;
                        nsegs = (MAX_SKB_FRAGS - 1) * n;
                } else {
-                       n = mss / PAGE_SIZE;
-                       if (mss & (PAGE_SIZE - 1))
+                       n = mss / BNXT_RX_PAGE_SIZE;
+                       if (mss & (BNXT_RX_PAGE_SIZE - 1))
                                n++;
                        nsegs = (MAX_SKB_FRAGS - n) / n;
                }
@@ -4055,9 +4100,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
 }
 
 static int bnxt_cfg_rx_mode(struct bnxt *);
+static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
 
 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 {
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
        int rc = 0;
 
        if (irq_re_init) {
@@ -4113,13 +4160,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
                netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
                goto err_out;
        }
-       bp->vnic_info[0].uc_filter_count = 1;
+       vnic->uc_filter_count = 1;
 
-       bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
        if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
-               bp->vnic_info[0].rx_mask |=
-                               CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       if (bp->dev->flags & IFF_ALLMULTI) {
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+       } else {
+               u32 mask = 0;
+
+               bnxt_mc_list_updated(bp, &mask);
+               vnic->rx_mask |= mask;
+       }
 
        rc = bnxt_cfg_rx_mode(bp);
        if (rc)
@@ -4351,7 +4407,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
        if (bp->flags & BNXT_FLAG_MSIX_CAP)
                rc = bnxt_setup_msix(bp);
 
-       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
                /* fallback to INTA */
                rc = bnxt_setup_inta(bp);
        }
@@ -4611,6 +4667,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
        link_info->phy_ver[1] = resp->phy_min;
        link_info->phy_ver[2] = resp->phy_bld;
        link_info->media_type = resp->media_type;
+       link_info->phy_type = resp->phy_type;
        link_info->transceiver = resp->xcvr_pkg_type;
        link_info->phy_addr = resp->eee_config_phy_addr &
                              PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
@@ -4789,6 +4846,21 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       if (BNXT_VF(bp))
+               return 0;
+
+       if (pci_num_vf(bp->pdev))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
 static bool bnxt_eee_config_ok(struct bnxt *bp)
 {
        struct ethtool_eee *eee = &bp->eee;
@@ -5043,6 +5115,7 @@ static int bnxt_close(struct net_device *dev)
        struct bnxt *bp = netdev_priv(dev);
 
        bnxt_close_nic(bp, true, true);
+       bnxt_hwrm_shutdown_link(bp);
        return 0;
 }
 
@@ -5679,10 +5752,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-#ifdef CONFIG_BNXT_SRIOV
-       if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
-               return -EADDRNOTAVAIL;
-#endif
+       rc = bnxt_approve_mac(bp, addr->sa_data);
+       if (rc)
+               return rc;
 
        if (ether_addr_equal(addr->sa_data, dev->dev_addr))
                return 0;
@@ -6162,14 +6234,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                           NETIF_F_TSO | NETIF_F_TSO6 |
                           NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
                           NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
-                          NETIF_F_RXHASH |
+                          NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+                          NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
                           NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
 
        dev->hw_enc_features =
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
                        NETIF_F_TSO | NETIF_F_TSO6 |
                        NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
-                       NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+                       NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+                       NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+                       NETIF_F_GSO_PARTIAL;
+       dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                   NETIF_F_GSO_GRE_CSUM;
        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
                            NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
index cc8e38a9f68435d5293b8b2495a8c758d1864701..62896352b0df0c396aa1c5e4e2d08c6c76d92366 100644 (file)
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
 
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
 #define BNXT_MIN_PKT_SIZE      45
 
 #define BNXT_NUM_TESTS(bp)     0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
 
 struct bnxt_sw_rx_agg_bd {
        struct page             *page;
+       unsigned int            offset;
        dma_addr_t              mapping;
 };
 
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
        unsigned long           *rx_agg_bmap;
        u16                     rx_agg_bmap_size;
 
+       struct page             *rx_page;
+       unsigned int            rx_page_offset;
+
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
@@ -759,6 +772,7 @@ struct bnxt_ntuple_filter {
 };
 
 struct bnxt_link_info {
+       u8                      phy_type;
        u8                      media_type;
        u8                      transceiver;
        u8                      phy_addr;
index a2e93241b06bde7be3b87100bba4ff7866dbab8d..d6e41f237f2ca412c53301e10fe50a9595bdcbb9 100644 (file)
@@ -850,7 +850,15 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                set_pause = true;
        } else {
                u16 fw_speed;
+               u8 phy_type = link_info->phy_type;
 
+               if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
+                   phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+                   link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+                       netdev_err(dev, "10GBase-T devices must autoneg\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
                /* TODO: currently don't support half duplex */
                if (cmd->duplex == DUPLEX_HALF) {
                        netdev_err(dev, "HALF DUPLEX is not supported!\n");
index 8457850b0bdd37807839d1b35159d832316ba7b4..363884dd9e8ad6f0801dfa29c5b0c648fec34a4e 100644 (file)
@@ -865,6 +865,31 @@ update_vf_mac_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 }
 
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+       struct hwrm_func_vf_cfg_input req = {0};
+       int rc = 0;
+
+       if (!BNXT_VF(bp))
+               return 0;
+
+       if (bp->hwrm_spec_code < 0x10202) {
+               if (is_valid_ether_addr(bp->vf.mac_addr))
+                       rc = -EADDRNOTAVAIL;
+               goto mac_done;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+       req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+       memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+       if (rc) {
+               rc = -EADDRNOTAVAIL;
+               netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+                           mac);
+       }
+       return rc;
+}
 #else
 
 void bnxt_sriov_disable(struct bnxt *bp)
@@ -879,4 +904,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
 void bnxt_update_vf_mac(struct bnxt *bp)
 {
 }
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+       return 0;
+}
 #endif
index 3f08354a247e04e0ffa359f5e03ca23cd7951bff..0392670ab49c07e8c9b147b3eb71b8b2183ba371 100644 (file)
@@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
 void bnxt_sriov_disable(struct bnxt *);
 void bnxt_hwrm_exec_fwd_req(struct bnxt *);
 void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
 #endif
index b69dc58faeab5b74ffc3142341efea084d83706d..b1d2ac818710b36328a6e0d50a77c2436d57a850 100644 (file)
@@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev)
        return 0;
 
 err1:
-       cp->free_resc(dev);
+       if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
+               cp->stop_hw(dev);
+       else
+               cp->free_resc(dev);
        pci_dev_put(dev->pcidev);
        return err;
 }
index f7b42b9fc9795074c97d26a73e768ec843340efd..541456398dfb1efac4ba455aaa31c8fc723c098a 100644 (file)
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               if (sizeof(unsigned long) != sizeof(u32) &&
+                   s->stat_sizeof == sizeof(unsigned long))
+                       data[i] = *(unsigned long *)p;
+               else
+                       data[i] = *(u32 *)p;
        }
 }
 
@@ -1221,8 +1225,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
        dev->stats.tx_packets += pkts_compl;
        dev->stats.tx_bytes += bytes_compl;
 
+       txq = netdev_get_tx_queue(dev, ring->queue);
+       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
-               txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
                        netif_tx_wake_queue(txq);
        }
@@ -1516,6 +1522,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        ring->prod_index += nr_frags + 1;
        ring->prod_index &= DMA_P_INDEX_MASK;
 
+       netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
        if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
 
@@ -1735,7 +1743,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
        work_done = bcmgenet_desc_rx(ring, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ring->int_enable(ring);
        }
 
@@ -2364,6 +2372,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
+       struct netdev_queue *txq;
 
        bcmgenet_fini_rx_napi(priv);
        bcmgenet_fini_tx_napi(priv);
@@ -2378,6 +2387,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
                }
        }
 
+       for (i = 0; i < priv->hw_params->tx_queues; i++) {
+               txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+               netdev_tx_reset_queue(txq);
+       }
+
+       txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+       netdev_tx_reset_queue(txq);
+
        bcmgenet_free_rx_buffers(priv);
        kfree(priv->rx_cbs);
        kfree(priv->tx_cbs);
@@ -2493,7 +2510,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 
                if (likely(napi_schedule_prep(&rx_ring->napi))) {
                        rx_ring->int_disable(rx_ring);
-                       __napi_schedule(&rx_ring->napi);
+                       __napi_schedule_irqoff(&rx_ring->napi);
                }
        }
 
@@ -2506,7 +2523,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 
                if (likely(napi_schedule_prep(&tx_ring->napi))) {
                        tx_ring->int_disable(tx_ring);
-                       __napi_schedule(&tx_ring->napi);
+                       __napi_schedule_irqoff(&tx_ring->napi);
                }
        }
 
@@ -2536,7 +2553,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 
                if (likely(napi_schedule_prep(&rx_ring->napi))) {
                        rx_ring->int_disable(rx_ring);
-                       __napi_schedule(&rx_ring->napi);
+                       __napi_schedule_irqoff(&rx_ring->napi);
                }
        }
 
@@ -2545,7 +2562,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 
                if (likely(napi_schedule_prep(&tx_ring->napi))) {
                        tx_ring->int_disable(tx_ring);
-                       __napi_schedule(&tx_ring->napi);
+                       __napi_schedule_irqoff(&tx_ring->napi);
                }
        }
 
@@ -3042,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev)
        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
        bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        dev->stats.tx_errors++;
 
index eacc559679bf84801c6e4f6211bcb7ae2c4ac1c5..f1b81187a20101eca9d491a61c08bf1e4e7af30c 100644 (file)
@@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
        spin_lock_irqsave(&sc->sbm_lock, flags);
 
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
 
        spin_unlock_irqrestore(&sc->sbm_lock, flags);
index 3010080cfeee350a2e523ce0c26aeb427953f21a..ff300f7cf5295fc0d4f1e1b0692d74e6a6091912 100644 (file)
@@ -7383,7 +7383,7 @@ static void tg3_napi_fini(struct tg3 *tp)
 
 static inline void tg3_netif_stop(struct tg3 *tp)
 {
-       tp->dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(tp->dev);    /* prevent tx timeout */
        tg3_napi_disable(tp);
        netif_carrier_off(tp->dev);
        netif_tx_disable(tp->dev);
index eec3200ade4a5f8fa1a126c812fa01926b648510..cb07d95e3dd9d6fb97fdbb58bcf44e3d2ae256ea 100644 (file)
@@ -440,7 +440,7 @@ static int macb_mii_init(struct macb *bp)
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                 bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->dev->dev;
+       bp->mii_bus->parent = &bp->pdev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
                                struct phy_device *phydev;
 
                                phydev = mdiobus_scan(bp->mii_bus, i);
-                               if (IS_ERR(phydev)) {
+                               if (IS_ERR(phydev) &&
+                                   PTR_ERR(phydev) != -ENODEV) {
                                        err = PTR_ERR(phydev);
                                        break;
                                }
@@ -3005,29 +3006,36 @@ static int macb_probe(struct platform_device *pdev)
        if (err)
                goto err_out_free_netdev;
 
+       err = macb_mii_init(bp);
+       if (err)
+               goto err_out_free_netdev;
+
+       phydev = bp->phy_dev;
+
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_unregister_mdio;
        }
 
-       err = macb_mii_init(bp);
-       if (err)
-               goto err_out_unregister_netdev;
-
-       netif_carrier_off(dev);
+       phy_attached_info(phydev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
                    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
                    dev->base_addr, dev->irq, dev->dev_addr);
 
-       phydev = bp->phy_dev;
-       phy_attached_info(phydev);
-
        return 0;
 
-err_out_unregister_netdev:
-       unregister_netdev(dev);
+err_out_unregister_mdio:
+       phy_disconnect(bp->phy_dev);
+       mdiobus_unregister(bp->mii_bus);
+       mdiobus_free(bp->mii_bus);
+
+       /* Shutdown the PHY if there is a GPIO reset */
+       if (bp->reset_gpio)
+               gpiod_set_value(bp->reset_gpio, 0);
 
 err_out_free_netdev:
        free_netdev(dev);
index 34d269cd5579b7e0f1c1afd29bffb04c8711d71d..8de79ae63231b0ad21e47edb9a150e026adf6bd3 100644 (file)
@@ -2899,7 +2899,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
        if (status == IQ_SEND_STOP)
                stop_q(lio->netdev, q_idx);
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        stats->tx_done++;
        stats->tx_tot_bytes += skb->len;
@@ -2928,7 +2928,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
        netif_info(lio, tx_err, lio->netdev,
                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
                   netdev->stats.tx_dropped);
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
        txqs_wake(netdev);
 }
 
index c177c7cec13b462b80a2001a9d8a272b606710b1..388cd799d9edc203c88a60066ef98ed3d4958ef9 100644 (file)
@@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
        /* Ring the bell.  */
        cvmx_write_csr(p->mix + MIX_ORING2, 1);
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
        rv = NETDEV_TX_OK;
 out:
        octeon_mgmt_update_tx_stats(netdev);
index bfee298fc02abc96e16cae7854d9de3036172542..a19e73f11d73c84a829d95980688249c511b21c4 100644 (file)
@@ -1442,7 +1442,7 @@ static void nicvf_reset_task(struct work_struct *work)
 
        nicvf_stop(nic->netdev);
        nicvf_open(nic->netdev);
-       nic->netdev->trans_start = jiffies;
+       netif_trans_update(nic->netdev);
 }
 
 static int nicvf_config_loopback(struct nicvf *nic,
index 967951582e033d8a34a3463d5047f15bb7c26219..d20539a6d162bd5f0dbbd9c79d89e6784e37b969 100644 (file)
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
                }
 
                lmac++;
-               if (lmac == MAX_LMAC_PER_BGX)
+               if (lmac == MAX_LMAC_PER_BGX) {
+                       of_node_put(node);
                        break;
+               }
        }
-       of_node_put(node);
        return 0;
 
 defer:
index 526ea74e82d9590b248530148a4a0776411d608d..86f467a2c4859608cc0337e57534d4a2d5e54582 100644 (file)
@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
        struct cmdQ *q = &sge->cmdQ[qid];
        unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
 
-       if (!spin_trylock(&q->lock))
-               return NETDEV_TX_LOCKED;
+       spin_lock(&q->lock);
 
        reclaim_completed_tx(sge, q);
 
index 60908eab3b3adf805d1b6cef462bd24ec5f64fe6..43da891fab97e7f16b572dd67f3fc6aa9020a1fb 100644 (file)
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
        u8 cpus[SGE_QSETS + 1];
-       u16 rspq_map[RSS_TABLE_SIZE];
+       u16 rspq_map[RSS_TABLE_SIZE + 1];
 
        for (i = 0; i < SGE_QSETS; ++i)
                cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
                rspq_map[i] = i % nq0;
                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
        }
+       rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 
        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
index 984a3cc26f86ddb7a194b4738a3dd7e018b0bc33..b4fceb92479fa9838db9c78281b7fe31ff7b7e67 100644 (file)
@@ -324,7 +324,9 @@ struct adapter_params {
        unsigned int sf_fw_start;         /* start of FW image in flash */
 
        unsigned int fw_vers;
+       unsigned int bs_vers;           /* bootstrap version */
        unsigned int tp_vers;
+       unsigned int er_vers;           /* expansion ROM version */
        u8 api_vers[7];
 
        unsigned short mtus[NMTUS];
@@ -357,6 +359,34 @@ struct sge_idma_monitor_state {
        unsigned int idma_warn[2];      /* time to warning in HZ */
 };
 
+/* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+       u64 cmd[MBOX_LEN / 8];          /* a Firmware Mailbox Command/Reply */
+       u64 timestamp;                  /* OS-dependent timestamp */
+       u32 seqno;                      /* sequence number */
+       s16 access;                     /* time (ms) to access mailbox */
+       s16 execute;                    /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+       unsigned int size;              /* number of entries in the log */
+       unsigned int cursor;            /* next position in the log to write */
+       u32 seqno;                      /* next sequence number */
+       /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+                                                 unsigned int entry_idx)
+{
+       return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
 #include "t4fw_api.h"
 
 #define FW_VERSION(chip) ( \
@@ -394,6 +424,7 @@ struct link_config {
        unsigned char  fc;               /* actual link flow control */
        unsigned char  autoneg;          /* autonegotiating? */
        unsigned char  link_ok;          /* link up? */
+       unsigned char  link_down_rc;     /* link down reason */
 };
 
 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -731,6 +762,7 @@ struct adapter {
        u32 t4_bar0;
        struct pci_dev *pdev;
        struct device *pdev_dev;
+       const char *name;
        unsigned int mbox;
        unsigned int pf;
        unsigned int flags;
@@ -776,6 +808,10 @@ struct adapter {
        struct work_struct db_drop_task;
        bool tid_release_task_busy;
 
+       /* support for mailbox command/reply logging */
+#define T4_OS_LOG_MBOX_CMDS 256
+       struct mbox_cmd_log *mbox_log;
+
        struct dentry *debugfs_root;
        bool use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
        bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -1306,6 +1342,7 @@ int t4_fl_pkt_align(struct adapter *adap);
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 int t4_check_fw_version(struct adapter *adap);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -1329,6 +1366,8 @@ int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
 int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_init_portinfo(struct port_info *pi, int mbox,
+                    int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1451,6 +1490,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 *valp);
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id);
 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
               unsigned int vf, unsigned int iqtype, unsigned int iqid,
               unsigned int fl0id, unsigned int fl1id);
@@ -1461,6 +1503,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
index 052c660aca80a8dd06593112c9961a6bdda07309..6ee2ed30626bfdf7c221a2c7846bc0abf90ac9ad 100644 (file)
@@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
 {
        const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
        int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
-       struct net_device *dev = adap->port[port];
+       struct net_device *dev = adap->port[adap->chan_map[port]];
        struct port_info *pi = netdev_priv(dev);
        struct port_dcb_info *dcb = &pi->dcb;
        int dcb_type = pcmd->u.dcb.pgid.type;
index 0bb41e9b9b1ca97e31937e6c19b05c190d6faba2..91fb50850fff6df7557549a5453045ff8be1fc0d 100644 (file)
@@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = {
        .release = seq_release_private
 };
 
+/* Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries.  But it's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adapter = seq->private;
+       struct mbox_cmd_log *log = adapter->mbox_log;
+       struct mbox_cmd *entry;
+       int entry_idx, i;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(seq,
+                          "%10s  %15s  %5s  %5s  %s\n",
+                          "Seq#", "Tstamp", "Atime", "Etime",
+                          "Command/Reply");
+               return 0;
+       }
+
+       entry_idx = log->cursor + ((uintptr_t)v - 2);
+       if (entry_idx >= log->size)
+               entry_idx -= log->size;
+       entry = mbox_cmd_log_entry(log, entry_idx);
+
+       /* skip over unused entries */
+       if (entry->timestamp == 0)
+               return 0;
+
+       seq_printf(seq, "%10u  %15llu  %5d  %5d",
+                  entry->seqno, entry->timestamp,
+                  entry->access, entry->execute);
+       for (i = 0; i < MBOX_LEN / 8; i++) {
+               u64 flit = entry->cmd[i];
+               u32 hi = (u32)(flit >> 32);
+               u32 lo = (u32)flit;
+
+               seq_printf(seq, "  %08x %08x", hi, lo);
+       }
+       seq_puts(seq, "\n");
+       return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct adapter *adapter = seq->private;
+       struct mbox_cmd_log *log = adapter->mbox_log;
+
+       return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+       return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+       .start = mboxlog_start,
+       .next  = mboxlog_next,
+       .stop  = mboxlog_stop,
+       .show  = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &mboxlog_seq_ops);
+
+       if (!res) {
+               struct seq_file *seq = file->private_data;
+
+               seq->private = inode->i_private;
+       }
+       return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mboxlog_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
 static int mbox_show(struct seq_file *seq, void *v)
 {
        static const char * const owner[] = { "none", "FW", "driver",
@@ -1572,6 +1670,7 @@ static const struct file_operations flash_debugfs_fops = {
        .owner   = THIS_MODULE,
        .open    = mem_open,
        .read    = flash_read,
+       .llseek  = default_llseek,
 };
 
 static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -3128,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap)
                { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
                { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
                { "devlog", &devlog_fops, S_IRUSR, 0 },
+               { "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
                { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
                { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
                { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
index a1e329ec24cd1fd8e6d6d2cf68a8d55d886e69d1..477db477b133691bcbf82ebdcd7ce90913eb1102 100644 (file)
@@ -304,6 +304,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
 }
 #endif /* CONFIG_CHELSIO_T4_DCB */
 
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+       struct port_info *pi = netdev_priv(dev);
+
+       if (!pi->dcb.enabled)
+               return 0;
+
+       return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+               (pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+       return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 {
        struct net_device *dev = adapter->port[port_id];
@@ -314,8 +330,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
                        netif_carrier_on(dev);
                else {
 #ifdef CONFIG_CHELSIO_T4_DCB
-                       cxgb4_dcb_state_init(dev);
-                       dcb_tx_queue_prio_enable(dev, false);
+                       if (cxgb4_dcb_enabled(dev)) {
+                               cxgb4_dcb_state_init(dev);
+                               dcb_tx_queue_prio_enable(dev, false);
+                       }
 #endif /* CONFIG_CHELSIO_T4_DCB */
                        netif_carrier_off(dev);
                }
@@ -337,6 +355,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
                netdev_info(dev, "port module unplugged\n");
        else if (pi->mod_type < ARRAY_SIZE(mod_str))
                netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+               netdev_info(dev, "%s: unsupported port module inserted\n",
+                           dev->name);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+               netdev_info(dev, "%s: unknown port module inserted\n",
+                           dev->name);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+               netdev_info(dev, "%s: transceiver module error\n", dev->name);
+       else
+               netdev_info(dev, "%s: unknown module type %d inserted\n",
+                           dev->name, pi->mod_type);
 }
 
 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -483,28 +512,12 @@ static int link_start(struct net_device *dev)
        return ret;
 }
 
-int cxgb4_dcb_enabled(const struct net_device *dev)
-{
-#ifdef CONFIG_CHELSIO_T4_DCB
-       struct port_info *pi = netdev_priv(dev);
-
-       if (!pi->dcb.enabled)
-               return 0;
-
-       return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
-               (pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
-       return 0;
-#endif
-}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
-
 #ifdef CONFIG_CHELSIO_T4_DCB
 /* Handle a Data Center Bridging update message from the firmware. */
 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
 {
        int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
-       struct net_device *dev = adap->port[port];
+       struct net_device *dev = adap->port[adap->chan_map[port]];
        int old_dcb_enabled = cxgb4_dcb_enabled(dev);
        int new_dcb_enabled;
 
@@ -634,7 +647,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
                    action == FW_PORT_ACTION_GET_PORT_INFO) {
                        int port = FW_PORT_CMD_PORTID_G(
                                        be32_to_cpu(pcmd->op_to_portid));
-                       struct net_device *dev = q->adap->port[port];
+                       struct net_device *dev =
+                               q->adap->port[q->adap->chan_map[port]];
                        int state_input = ((pcmd->u.info.dcbxdis_pkd &
                                            FW_PORT_CMD_DCBXDIS_F)
                                           ? CXGB4_DCB_INPUT_FW_DISABLED
@@ -3738,7 +3752,10 @@ static int adap_init0(struct adapter *adap)
         * is excessively mismatched relative to the driver.)
         */
        t4_get_fw_version(adap, &adap->params.fw_vers);
+       t4_get_bs_version(adap, &adap->params.bs_vers);
        t4_get_tp_version(adap, &adap->params.tp_vers);
+       t4_get_exprom_version(adap, &adap->params.er_vers);
+
        ret = t4_check_fw_version(adap);
        /* If firmware is too old (not supported by driver) force an update. */
        if (ret)
@@ -4652,6 +4669,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap)
                         "suggested for optimal performance.\n");
 }
 
+/* Dump basic information about the adapter */
+static void print_adapter_info(struct adapter *adapter)
+{
+       /* Device information */
+       dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
+                adapter->params.vpd.id,
+                CHELSIO_CHIP_RELEASE(adapter->params.chip));
+       dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
+                adapter->params.vpd.sn, adapter->params.vpd.pn);
+
+       /* Firmware Version */
+       if (!adapter->params.fw_vers)
+               dev_warn(adapter->pdev_dev, "No firmware loaded\n");
+       else
+               dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
+
+       /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
+        * Firmware, so dev_info() is more appropriate here.)
+        */
+       if (!adapter->params.bs_vers)
+               dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
+       else
+               dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
+
+       /* TP Microcode Version */
+       if (!adapter->params.tp_vers)
+               dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
+       else
+               dev_info(adapter->pdev_dev,
+                        "TP Microcode version: %u.%u.%u.%u\n",
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+       /* Expansion ROM version */
+       if (!adapter->params.er_vers)
+               dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
+       else
+               dev_info(adapter->pdev_dev,
+                        "Expansion ROM version: %u.%u.%u.%u\n",
+                        FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
+                        FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
+                        FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
+                        FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
+
+       /* Software/Hardware configuration */
+       dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+                is_offload(adapter) ? "R" : "",
+                ((adapter->flags & USING_MSIX) ? "MSI-X" :
+                 (adapter->flags & USING_MSI) ? "MSI" : ""),
+                is_offload(adapter) ? "Offload" : "non-Offload");
+}
+
 static void print_port_info(const struct net_device *dev)
 {
        char buf[80];
@@ -4679,14 +4758,8 @@ static void print_port_info(const struct net_device *dev)
                --bufp;
        sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
 
-       netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
-                   adap->params.vpd.id,
-                   CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
-                   is_offload(adap) ? "R" : "",
-                   (adap->flags & USING_MSIX) ? " MSI-X" :
-                   (adap->flags & USING_MSI) ? " MSI" : "");
-       netdev_info(dev, "S/N: %s, P/N: %s\n",
-                   adap->params.vpd.sn, adap->params.vpd.pn);
+       netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
+                   dev->name, adap->params.vpd.id, adap->name, buf);
 }
 
 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
@@ -4838,12 +4911,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_adapter;
        }
 
+       adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+                                   (sizeof(struct mbox_cmd) *
+                                    T4_OS_LOG_MBOX_CMDS),
+                                   GFP_KERNEL);
+       if (!adapter->mbox_log) {
+               err = -ENOMEM;
+               goto out_free_adapter;
+       }
+       adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
        adapter->regs = regs;
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
+       adapter->name = pci_name(pdev);
        adapter->mbox = func;
        adapter->pf = func;
        adapter->msg_enable = dflt_msg_enable;
@@ -5074,6 +5158,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (is_offload(adapter))
                attach_ulds(adapter);
 
+       print_adapter_info(adapter);
+
 sriov:
 #ifdef CONFIG_PCI_IOV
        if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -5093,6 +5179,7 @@ sriov:
        if (adapter->workq)
                destroy_workqueue(adapter->workq);
 
+       kfree(adapter->mbox_log);
        kfree(adapter);
  out_unmap_bar0:
        iounmap(regs);
@@ -5159,6 +5246,7 @@ static void remove_one(struct pci_dev *pdev)
                        adapter->flags &= ~DEV_ENABLED;
                }
                pci_release_regions(pdev);
+               kfree(adapter->mbox_log);
                synchronize_rcu();
                kfree(adapter);
        } else
index 13b144bcf725ec29d42dbdc1b33fc0c39c1fe919..bad253beb8c8671ede3531d7471dd6754651d178 100644 (file)
@@ -2981,18 +2981,34 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
 void t4_free_sge_resources(struct adapter *adap)
 {
        int i;
-       struct sge_eth_rxq *eq = adap->sge.ethrxq;
-       struct sge_eth_txq *etq = adap->sge.ethtxq;
+       struct sge_eth_rxq *eq;
+       struct sge_eth_txq *etq;
+
+       /* stop all Rx queues in order to start them draining */
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
+               if (eq->rspq.desc)
+                       t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+                                  FW_IQ_TYPE_FL_INT_CAP,
+                                  eq->rspq.cntxt_id,
+                                  eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+                                  0xffff);
+       }
 
        /* clean up Ethernet Tx/Rx queues */
-       for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
                if (eq->rspq.desc)
                        free_rspq_fl(adap, &eq->rspq,
                                     eq->fl.size ? &eq->fl : NULL);
+
+               etq = &adap->sge.ethtxq[i];
                if (etq->q.desc) {
                        t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
                                       etq->q.cntxt_id);
+                       __netif_tx_lock_bh(etq->txq);
                        free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+                       __netif_tx_unlock_bh(etq->txq);
                        kfree(etq->q.sdesc);
                        free_txq(adap, &etq->q);
                }
index cc1736bece0faabe22f04a828f7d1740bf9462de..a63addb4e72c619f16642891fb94631a17ce5d83 100644 (file)
@@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
                  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
 }
 
-static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+/**
+ *     t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ *     @adapter: the adapter
+ *     @cmd: the Firmware Mailbox Command or Reply
+ *     @size: command length in bytes
+ *     @access: the time (ms) needed to access the Firmware Mailbox
+ *     @execute: the time (ms) the command spent being executed
+ */
+static void t4_record_mbox(struct adapter *adapter,
+                          const __be64 *cmd, unsigned int size,
+                          int access, int execute)
 {
-       dev_err(adap->pdev_dev,
-               "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
-               (unsigned long long)t4_read_reg64(adap, data_reg),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 8),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 16),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 24),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 32),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 40),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 48),
-               (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+       struct mbox_cmd_log *log = adapter->mbox_log;
+       struct mbox_cmd *entry;
+       int i;
+
+       entry = mbox_cmd_log_entry(log, log->cursor++);
+       if (log->cursor == log->size)
+               log->cursor = 0;
+
+       for (i = 0; i < size / 8; i++)
+               entry->cmd[i] = be64_to_cpu(cmd[i]);
+       while (i < MBOX_LEN / 8)
+               entry->cmd[i++] = 0;
+       entry->timestamp = jiffies;
+       entry->seqno = log->seqno++;
+       entry->access = access;
+       entry->execute = execute;
 }
 
 /**
@@ -268,12 +284,16 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                1, 1, 3, 5, 10, 10, 20, 50, 100, 200
        };
 
+       u16 access = 0;
+       u16 execute = 0;
        u32 v;
        u64 res;
-       int i, ms, delay_idx;
+       int i, ms, delay_idx, ret;
        const __be64 *p = cmd;
        u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
        u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+       __be64 cmd_rpl[MBOX_LEN / 8];
+       u32 pcie_fw;
 
        if ((size & 15) || size > MBOX_LEN)
                return -EINVAL;
@@ -285,13 +305,24 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
        if (adap->pdev->error_state != pci_channel_io_normal)
                return -EIO;
 
+       /* If we have a negative timeout, that implies that we can't sleep. */
+       if (timeout < 0) {
+               sleep_ok = false;
+               timeout = -timeout;
+       }
+
        v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
                v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 
-       if (v != MBOX_OWNER_DRV)
-               return v ? -EBUSY : -ETIMEDOUT;
+       if (v != MBOX_OWNER_DRV) {
+               ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+               t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+               return ret;
+       }
 
+       /* Copy in the new mailbox command and send it on its way ... */
+       t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
        for (i = 0; i < size; i += 8)
                t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
@@ -301,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
        delay_idx = 0;
        ms = delay[0];
 
-       for (i = 0; i < timeout; i += ms) {
+       for (i = 0;
+            !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
+            i < timeout;
+            i += ms) {
                if (sleep_ok) {
                        ms = delay[delay_idx];  /* last element may repeat */
                        if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -317,26 +351,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                                continue;
                        }
 
-                       res = t4_read_reg64(adap, data_reg);
+                       get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
+                       res = be64_to_cpu(cmd_rpl[0]);
+
                        if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
                                fw_asrt(adap, data_reg);
                                res = FW_CMD_RETVAL_V(EIO);
                        } else if (rpl) {
-                               get_mbox_rpl(adap, rpl, size / 8, data_reg);
+                               memcpy(rpl, cmd_rpl, size);
                        }
 
-                       if (FW_CMD_RETVAL_G((int)res))
-                               dump_mbox(adap, mbox, data_reg);
                        t4_write_reg(adap, ctl_reg, 0);
+
+                       execute = i + ms;
+                       t4_record_mbox(adap, cmd_rpl,
+                                      MBOX_LEN, access, execute);
                        return -FW_CMD_RETVAL_G((int)res);
                }
        }
 
-       dump_mbox(adap, mbox, data_reg);
+       ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+       t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
                *(const u8 *)cmd, mbox);
        t4_report_fw_error(adap);
-       return -ETIMEDOUT;
+       return ret;
 }
 
 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
@@ -2557,6 +2596,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_SIZE           0x800
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
@@ -2594,6 +2634,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
+       /* We have two VPD data structures stored in the adapter VPD area.
+        * By default, Linux calculates the size of the VPD area by traversing
+        * the first VPD area at offset 0x0, so we need to tell the OS what
+        * our real VPD size is.
+        */
+       ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+       if (ret < 0)
+               goto out;
+
        /* Card information normally starts at VPD_BASE but early cards had
         * it at 0.
         */
@@ -2926,6 +2975,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
                             vers, 0);
 }
 
+/**
+ *     t4_get_bs_version - read the firmware bootstrap version
+ *     @adapter: the adapter
+ *     @vers: where to place the version
+ *
+ *     Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+       return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+                            offsetof(struct fw_hdr, fw_ver), 1,
+                            vers, 0);
+}
+
 /**
  *     t4_get_tp_version - read the TP microcode version
  *     @adapter: the adapter
@@ -6939,6 +7002,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_iq_stop - stop an ingress queue and its FLs
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @pf: the PF owning the queues
+ *     @vf: the VF owning the queues
+ *     @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *     @iqid: ingress queue id
+ *     @fl0id: FL0 queue id or 0xffff if no attached FL0
+ *     @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *     Stops an ingress queue and its associated FLs, if any.  This causes
+ *     any current or future data/messages destined for these queues to be
+ *     tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id)
+{
+       struct fw_iq_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+                                 FW_IQ_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+       c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+       c.iqid = cpu_to_be16(iqid);
+       c.fl0id = cpu_to_be16(fl0id);
+       c.fl1id = cpu_to_be16(fl1id);
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_iq_free - free an ingress queue and its FLs
  *     @adap: the adapter
@@ -7046,52 +7142,122 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 }
 
 /**
- *     t4_handle_fw_rpl - process a FW reply message
+ *     t4_link_down_rc_str - return a string for a Link Down Reason Code
  *     @adap: the adapter
+ *     @link_down_rc: Link Down Reason Code
+ *
+ *     Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+       static const char * const reason[] = {
+               "Link Down",
+               "Remote Fault",
+               "Auto-negotiation Failure",
+               "Reserved",
+               "Insufficient Airflow",
+               "Unable To Determine Reason",
+               "No RX Signal Detected",
+               "Reserved",
+       };
+
+       if (link_down_rc >= ARRAY_SIZE(reason))
+               return "Bad Reason Code";
+
+       return reason[link_down_rc];
+}
+
+/**
+ *     t4_handle_get_port_info - process a FW reply message
+ *     @pi: the port info
  *     @rpl: start of the FW message
  *
- *     Processes a FW message, such as link state change messages.
+ *     Processes a GET_PORT_INFO FW reply message.
+ */
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+       const struct fw_port_cmd *p = (const void *)rpl;
+       struct adapter *adap = pi->adapter;
+
+       /* link/module state change message */
+       int speed = 0, fc = 0;
+       struct link_config *lc;
+       u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+       int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+       u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+       if (stat & FW_PORT_CMD_RXPAUSE_F)
+               fc |= PAUSE_RX;
+       if (stat & FW_PORT_CMD_TXPAUSE_F)
+               fc |= PAUSE_TX;
+       if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+               speed = 100;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+               speed = 1000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+               speed = 10000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+               speed = 40000;
+
+       lc = &pi->link_cfg;
+
+       if (mod != pi->mod_type) {
+               pi->mod_type = mod;
+               t4_os_portmod_changed(adap, pi->port_id);
+       }
+       if (link_ok != lc->link_ok || speed != lc->speed ||
+           fc != lc->fc) {     /* something changed */
+               if (!link_ok && lc->link_ok) {
+                       unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
+
+                       lc->link_down_rc = rc;
+                       dev_warn(adap->pdev_dev,
+                                "Port %d link down, reason: %s\n",
+                                pi->port_id, t4_link_down_rc_str(rc));
+               }
+               lc->link_ok = link_ok;
+               lc->speed = speed;
+               lc->fc = fc;
+               lc->supported = be16_to_cpu(p->u.info.pcap);
+               t4_os_link_changed(adap, pi->port_id, link_ok);
+       }
+}
+
+/**
+ *      t4_handle_fw_rpl - process a FW reply message
+ *      @adap: the adapter
+ *      @rpl: start of the FW message
+ *
+ *      Processes a FW message, such as link state change messages.
  */
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
 {
        u8 opcode = *(const u8 *)rpl;
 
-       if (opcode == FW_PORT_CMD) {    /* link/module state change message */
-               int speed = 0, fc = 0;
-               const struct fw_port_cmd *p = (void *)rpl;
+       /* This might be a port command ... this simplifies the following
+        * conditionals ...  We can get away with pre-dereferencing
+        * action_to_len16 because it's in the first 16 bytes and all messages
+        * will be at least that long.
+        */
+       const struct fw_port_cmd *p = (const void *)rpl;
+       unsigned int action =
+               FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
+
+       if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+               int i;
                int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
-               int port = adap->chan_map[chan];
-               struct port_info *pi = adap2pinfo(adap, port);
-               struct link_config *lc = &pi->link_cfg;
-               u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
-               int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
-               u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
-
-               if (stat & FW_PORT_CMD_RXPAUSE_F)
-                       fc |= PAUSE_RX;
-               if (stat & FW_PORT_CMD_TXPAUSE_F)
-                       fc |= PAUSE_TX;
-               if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
-                       speed = 100;
-               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
-                       speed = 1000;
-               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
-                       speed = 10000;
-               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
-                       speed = 40000;
-
-               if (link_ok != lc->link_ok || speed != lc->speed ||
-                   fc != lc->fc) {                    /* something changed */
-                       lc->link_ok = link_ok;
-                       lc->speed = speed;
-                       lc->fc = fc;
-                       lc->supported = be16_to_cpu(p->u.info.pcap);
-                       t4_os_link_changed(adap, port, link_ok);
-               }
-               if (mod != pi->mod_type) {
-                       pi->mod_type = mod;
-                       t4_os_portmod_changed(adap, port);
+               struct port_info *pi = NULL;
+
+               for_each_port(adap, i) {
+                       pi = adap2pinfo(adap, i);
+                       if (pi->tx_chan == chan)
+                               break;
                }
+
+               t4_handle_get_port_info(pi, rpl);
+       } else {
+               dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
+               return -EINVAL;
        }
        return 0;
 }
@@ -7611,61 +7777,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
        return 0;
 }
 
-int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+/**
+ *     t4_init_portinfo - allocate a virtual interface amd initialize port_info
+ *     @pi: the port_info
+ *     @mbox: mailbox to use for the FW command
+ *     @port: physical port associated with the VI
+ *     @pf: the PF owning the VI
+ *     @vf: the VF owning the VI
+ *     @mac: the MAC address of the VI
+ *
+ *     Allocates a virtual interface for the given physical port.  If @mac is
+ *     not %NULL it contains the MAC address of the VI as assigned by FW.
+ *     @mac should be large enough to hold an Ethernet address.
+ *     Returns < 0 on error.
+ */
+int t4_init_portinfo(struct port_info *pi, int mbox,
+                    int port, int pf, int vf, u8 mac[])
 {
-       u8 addr[6];
-       int ret, i, j = 0;
+       int ret;
        struct fw_port_cmd c;
-       struct fw_rss_vi_config_cmd rvc;
+       unsigned int rss_size;
 
        memset(&c, 0, sizeof(c));
-       memset(&rvc, 0, sizeof(rvc));
+       c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+                                    FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                    FW_PORT_CMD_PORTID_V(port));
+       c.action_to_len16 = cpu_to_be32(
+               FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+               FW_LEN16(c));
+       ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
+       if (ret)
+               return ret;
+
+       ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+       if (ret < 0)
+               return ret;
+
+       pi->viid = ret;
+       pi->tx_chan = port;
+       pi->lport = port;
+       pi->rss_size = rss_size;
+
+       ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+       pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+               FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+       pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
+       pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+       init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+       return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+       u8 addr[6];
+       int ret, i, j = 0;
 
        for_each_port(adap, i) {
-               unsigned int rss_size;
-               struct port_info *p = adap2pinfo(adap, i);
+               struct port_info *pi = adap2pinfo(adap, i);
 
                while ((adap->params.portvec & (1 << j)) == 0)
                        j++;
 
-               c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
-                                            FW_CMD_REQUEST_F | FW_CMD_READ_F |
-                                            FW_PORT_CMD_PORTID_V(j));
-               c.action_to_len16 = cpu_to_be32(
-                       FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
-                       FW_LEN16(c));
-               ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+               ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
                if (ret)
                        return ret;
 
-               ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
-               if (ret < 0)
-                       return ret;
-
-               p->viid = ret;
-               p->tx_chan = j;
-               p->lport = j;
-               p->rss_size = rss_size;
                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
                adap->port[i]->dev_port = j;
-
-               ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
-               p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
-                       FW_PORT_CMD_MDIOADDR_G(ret) : -1;
-               p->port_type = FW_PORT_CMD_PTYPE_G(ret);
-               p->mod_type = FW_PORT_MOD_TYPE_NA;
-
-               rvc.op_to_viid =
-                       cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
-                                   FW_CMD_REQUEST_F | FW_CMD_READ_F |
-                                   FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
-               rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
-               ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
-               if (ret)
-                       return ret;
-               p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
-
-               init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
                j++;
        }
        return 0;
index 2fc60e83a7a154c7e12751684a9300cd3a17a6c1..7f59ca458431d1c25c4f680a01c713e2858c730a 100644 (file)
@@ -220,6 +220,13 @@ enum {
        FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
        FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
 
+       /* Location of bootstrap firmware image in FLASH.
+        */
+       FLASH_FWBOOTSTRAP_START_SEC = 27,
+       FLASH_FWBOOTSTRAP_NSECS = 1,
+       FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+       FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
        /*
         * iSCSI persistent/crash information.
         */
index 06bc2d2e7a7315ceb0f544aac1fcaec1af51fbc6..a2cdfc1261dc77092049575f5519d7160a1c8f29 100644 (file)
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
        CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
        CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
+       CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
 
        /* T6 adapters:
         */
index 7ad6d4e75b2aa73eae5e8bfe29dda0f61d272a2a..392d6644fdd8d2e633e775654d8affc00ee3446e 100644 (file)
@@ -2510,6 +2510,11 @@ struct fw_port_cmd {
 #define FW_PORT_CMD_PTYPE_G(x) \
        (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
 
+#define FW_PORT_CMD_LINKDNRC_S         5
+#define FW_PORT_CMD_LINKDNRC_M         0x7
+#define FW_PORT_CMD_LINKDNRC_G(x)      \
+       (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M)
+
 #define FW_PORT_CMD_MODTYPE_S          0
 #define FW_PORT_CMD_MODTYPE_M          0x1f
 #define FW_PORT_CMD_MODTYPE_V(x)       ((x) << FW_PORT_CMD_MODTYPE_S)
index 4a707c32d76f523c84b7225cef9b6399b0ceddd3..734dd776c22fe196045a2da68b20e46caa01dfa4 100644 (file)
@@ -387,6 +387,10 @@ struct adapter {
        /* various locks */
        spinlock_t stats_lock;
 
+       /* support for mailbox command/reply logging */
+#define T4VF_OS_LOG_MBOX_CMDS 256
+       struct mbox_cmd_log *mbox_log;
+
        /* list of MAC addresses in MPS Hash */
        struct list_head mac_hlist;
 };
index 730fec73d5a6f4f79736555c18792cf9341e7281..04fc6f6d1e254adc5c2681546fd748800a2280de 100644 (file)
@@ -1703,6 +1703,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = {
  * ================================================
  */
 
+/*
+ * Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.  But as stated above, meh ...
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adapter = seq->private;
+       struct mbox_cmd_log *log = adapter->mbox_log;
+       struct mbox_cmd *entry;
+       int entry_idx, i;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(seq,
+                          "%10s  %15s  %5s  %5s  %s\n",
+                          "Seq#", "Tstamp", "Atime", "Etime",
+                          "Command/Reply");
+               return 0;
+       }
+
+       entry_idx = log->cursor + ((uintptr_t)v - 2);
+       if (entry_idx >= log->size)
+               entry_idx -= log->size;
+       entry = mbox_cmd_log_entry(log, entry_idx);
+
+       /* skip over unused entries */
+       if (entry->timestamp == 0)
+               return 0;
+
+       seq_printf(seq, "%10u  %15llu  %5d  %5d",
+                  entry->seqno, entry->timestamp,
+                  entry->access, entry->execute);
+       for (i = 0; i < MBOX_LEN / 8; i++) {
+               u64 flit = entry->cmd[i];
+               u32 hi = (u32)(flit >> 32);
+               u32 lo = (u32)flit;
+
+               seq_printf(seq, "  %08x %08x", hi, lo);
+       }
+       seq_puts(seq, "\n");
+       return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct adapter *adapter = seq->private;
+       struct mbox_cmd_log *log = adapter->mbox_log;
+
+       return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+       return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+       .start = mboxlog_start,
+       .next  = mboxlog_next,
+       .stop  = mboxlog_stop,
+       .show  = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &mboxlog_seq_ops);
+
+       if (!res) {
+               struct seq_file *seq = file->private_data;
+
+               seq->private = inode->i_private;
+       }
+       return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mboxlog_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
 /*
  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
  */
@@ -2122,6 +2221,7 @@ struct cxgb4vf_debugfs_entry {
 };
 
 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+       { "mboxlog",    S_IRUGO, &mboxlog_fops },
        { "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
        { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
        { "resources",  S_IRUGO, &resources_proc_fops },
@@ -2664,6 +2764,16 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
 
+       adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+                                   (sizeof(struct mbox_cmd) *
+                                    T4VF_OS_LOG_MBOX_CMDS),
+                                   GFP_KERNEL);
+       if (!adapter->mbox_log) {
+               err = -ENOMEM;
+               goto err_free_adapter;
+       }
+       adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
+
        /*
         * Initialize SMP data synchronization resources.
         */
@@ -2913,6 +3023,7 @@ err_unmap_bar0:
        iounmap(adapter->regs);
 
 err_free_adapter:
+       kfree(adapter->mbox_log);
        kfree(adapter);
 
 err_release_regions:
@@ -2982,6 +3093,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
                iounmap(adapter->regs);
                if (!is_t4(adapter->params.chip))
                        iounmap(adapter->bar2);
+               kfree(adapter->mbox_log);
                kfree(adapter);
        }
 
index 1ccd282949a5496ea15122857ba2bc293ed99668..1bb57d3fbbe8783e32187509081ea3a63991641e 100644 (file)
@@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
         * the new TX descriptors and return success.
         */
        txq_advance(&txq->q, ndesc);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        ring_tx_db(adapter, &txq->q, ndesc);
        return NETDEV_TX_OK;
 
index 9b40a85cc1e4feae33df1b95ebccb92129b4c84d..438374a05791759fc762122de15c77819df62016 100644 (file)
@@ -36,6 +36,7 @@
 #ifndef __T4VF_COMMON_H__
 #define __T4VF_COMMON_H__
 
+#include "../cxgb4/t4_hw.h"
 #include "../cxgb4/t4fw_api.h"
 
 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
@@ -227,6 +228,34 @@ struct adapter_params {
        u8 nports;                      /* # of Ethernet "ports" */
 };
 
+/* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+       u64 cmd[MBOX_LEN / 8];          /* a Firmware Mailbox Command/Reply */
+       u64 timestamp;                  /* OS-dependent timestamp */
+       u32 seqno;                      /* sequence number */
+       s16 access;                     /* time (ms) to access mailbox */
+       s16 execute;                    /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+       unsigned int size;              /* number of entries in the log */
+       unsigned int cursor;            /* next position in the log to write */
+       u32 seqno;                      /* next sequence number */
+       /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+                                                 unsigned int entry_idx)
+{
+       return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
 #include "adapter.h"
 
 #ifndef PCI_VENDOR_ID_CHELSIO
index fed83d88fc4ef09082078e3aad2f7f45beaece94..955ff7c61f1b8b03497052499724227448f7b624 100644 (file)
@@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
                *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
 }
 
-/*
- * Dump contents of mailbox with a leading tag.
+/**
+ *     t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ *     @adapter: the adapter
+ *     @cmd: the Firmware Mailbox Command or Reply
+ *     @size: command length in bytes
+ *     @access: the time (ms) needed to access the Firmware Mailbox
+ *     @execute: the time (ms) the command spent being executed
  */
-static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
+                            int size, int access, int execute)
 {
-       dev_err(adapter->pdev_dev,
-               "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
-               (unsigned long long)t4_read_reg64(adapter, mbox_data +  0),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data +  8),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
-               (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+       struct mbox_cmd_log *log = adapter->mbox_log;
+       struct mbox_cmd *entry;
+       int i;
+
+       entry = mbox_cmd_log_entry(log, log->cursor++);
+       if (log->cursor == log->size)
+               log->cursor = 0;
+
+       for (i = 0; i < size / 8; i++)
+               entry->cmd[i] = be64_to_cpu(cmd[i]);
+       while (i < MBOX_LEN / 8)
+               entry->cmd[i++] = 0;
+       entry->timestamp = jiffies;
+       entry->seqno = log->seqno++;
+       entry->access = access;
+       entry->execute = execute;
 }
 
 /**
@@ -120,10 +132,13 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                1, 1, 3, 5, 10, 10, 20, 50, 100
        };
 
+       u16 access = 0, execute = 0;
        u32 v, mbox_data;
-       int i, ms, delay_idx;
+       int i, ms, delay_idx, ret;
        const __be64 *p;
        u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+       u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
+       __be64 cmd_rpl[MBOX_LEN / 8];
 
        /* In T6, mailbox size is changed to 128 bytes to avoid
         * invalidating the entire prefetch buffer.
@@ -148,8 +163,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
        v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
                v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
-       if (v != MBOX_OWNER_DRV)
-               return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+       if (v != MBOX_OWNER_DRV) {
+               ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+               t4vf_record_mbox(adapter, cmd, size, access, ret);
+               return ret;
+       }
 
        /*
         * Write the command array into the Mailbox Data register array and
@@ -164,6 +182,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
         * Data registers before doing the write to the VF Mailbox Control
         * register.
         */
+       if (cmd_op != FW_VI_STATS_CMD)
+               t4vf_record_mbox(adapter, cmd, size, access, 0);
        for (i = 0, p = cmd; i < size; i += 8)
                t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
        t4_read_reg(adapter, mbox_data);         /* flush write */
@@ -209,31 +229,33 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                         * We return the (negated) firmware command return
                         * code (this depends on FW_SUCCESS == 0).
                         */
+                       get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
 
                        /* return value in low-order little-endian word */
-                       v = t4_read_reg(adapter, mbox_data);
-                       if (FW_CMD_RETVAL_G(v))
-                               dump_mbox(adapter, "FW Error", mbox_data);
+                       v = be64_to_cpu(cmd_rpl[0]);
 
                        if (rpl) {
                                /* request bit in high-order BE word */
                                WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
                                         & FW_CMD_REQUEST_F) == 0);
-                               get_mbox_rpl(adapter, rpl, size, mbox_data);
+                               memcpy(rpl, cmd_rpl, size);
                                WARN_ON((be32_to_cpu(*(__be32 *)rpl)
                                         & FW_CMD_REQUEST_F) != 0);
                        }
                        t4_write_reg(adapter, mbox_ctl,
                                     MBOWNER_V(MBOX_OWNER_NONE));
+                       execute = i + ms;
+                       if (cmd_op != FW_VI_STATS_CMD)
+                               t4vf_record_mbox(adapter, cmd_rpl, size, access,
+                                                execute);
                        return -FW_CMD_RETVAL_G(v);
                }
        }
 
-       /*
-        * We timed out.  Return the error ...
-        */
-       dump_mbox(adapter, "FW Timeout", mbox_data);
-       return -ETIMEDOUT;
+       /* We timed out.  Return the error ... */
+       ret = -ETIMEDOUT;
+       t4vf_record_mbox(adapter, cmd, size, access, ret);
+       return ret;
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
index b2182d3ba3cc464d1504856e7d3d36712e9c65d5..f15560a0671804df24c7ac2ae61cb6b9b3fd5f01 100644 (file)
@@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->hw_features |= NETIF_F_RXCSUM;
 
        netdev->features |= netdev->hw_features;
+       netdev->vlan_features |= netdev->features;
 
 #ifdef CONFIG_RFS_ACCEL
        netdev->hw_features |= NETIF_F_NTUPLE;
index 48d91941408d8fe3aa148c632ff33c1ea4201335..9e061307975f3f61c1689df3c2a1b5c612e94fe9 100644 (file)
@@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev)
        /* Init Driver variable */
        db->tx_pkt_cnt = 0;
        db->queue_pkt_len = 0;
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 }
 
 /* Our watchdog timed out. Called by the networking layer */
@@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev)
        dm9000_init_dm9000(dev);
        dm9000_unmask_interrupts(db);
        /* We can accept TX packets again */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 
        /* Restore previous register address */
index 3acde3b9b767c4c025b57130c0e497e62b6cd46c..cbe84972ff7a644fb31f84b30a6cf09e12cae737 100644 (file)
@@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev)
     }
 
     lp->interrupt = UNMASK_INTERRUPTS;
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
 
     START_DE4X5;
 
@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
 
     netif_stop_queue(dev);
     if (!lp->tx_enable)                   /* Cannot send for now */
-       return NETDEV_TX_LOCKED;
+               goto tx_err;
 
     /*
     ** Clean out the TX ring asynchronously to interrupts - sometimes the
@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
 
     /* Test if cache is already locked - requeue skb if so */
     if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
-       return NETDEV_TX_LOCKED;
+               goto tx_err;
 
     /* Transmit descriptor ring full or stale skb */
     if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
     lp->cache.lock = 0;
 
     return NETDEV_TX_OK;
+tx_err:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
 }
 
 /*
@@ -1932,7 +1935,7 @@ set_multicast_list(struct net_device *dev)
 
            lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
            outl(POLL_DEMAND, DE4X5_TPD);       /* Start the TX */
-           dev->trans_start = jiffies; /* prevent tx timeout */
+           netif_trans_update(dev); /* prevent tx timeout */
        }
     }
 }
index afd8e78e024e3d2cb6015202d3678210334358ff..8ed0fd8b1dda8301a57e4eb9607eeb9d7ccf9102 100644 (file)
        (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
        (pci_dev)->revision))
 
-/* Sten Check */
-#define DEVICE net_device
-
 /* Structure/enum declaration ------------------------------- */
 struct tx_desc {
         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -313,10 +310,10 @@ static u8 SF_mode;                /* Special Function: 1:VLAN, 2:RX Flow Control
 
 
 /* function declaration ------------------------------------- */
-static int dmfe_open(struct DEVICE *);
-static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
-static int dmfe_stop(struct DEVICE *);
-static void dmfe_set_filter_mode(struct DEVICE *);
+static int dmfe_open(struct net_device *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
+static int dmfe_stop(struct net_device *);
+static void dmfe_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
 static u16 read_srom_word(void __iomem *, int);
 static irqreturn_t dmfe_interrupt(int , void *);
@@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev);
 static void dmfe_descriptor_init(struct net_device *);
 static void allocate_rx_buffer(struct net_device *);
 static void update_cr6(u32, void __iomem *);
-static void send_filter_frame(struct DEVICE *);
-static void dm9132_id_table(struct DEVICE *);
+static void send_filter_frame(struct net_device *);
+static void dm9132_id_table(struct net_device *);
 static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 static void dmfe_phy_write_1bit(void __iomem *, u32);
@@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *);
 static void dmfe_process_mode(struct dmfe_board_info *);
 static void dmfe_timer(unsigned long);
 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
-static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
-static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
-static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_dynamic_reset(struct net_device *);
 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
-static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_init_dm910x(struct net_device *);
 static void dmfe_parse_srom(struct dmfe_board_info *);
 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 static void dmfe_program_DM9802(struct dmfe_board_info *);
@@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev)
  *     The interface is opened whenever "ifconfig" actives it.
  */
 
-static int dmfe_open(struct DEVICE *dev)
+static int dmfe_open(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        const int irq = db->pdev->irq;
@@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev)
  *     Enable Tx/Rx machine
  */
 
-static void dmfe_init_dm910x(struct DEVICE *dev)
+static void dmfe_init_dm910x(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        void __iomem *ioaddr = db->ioaddr;
@@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
  */
 
 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
-                                        struct DEVICE *dev)
+                                        struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        void __iomem *ioaddr = db->ioaddr;
@@ -728,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
                db->tx_packet_cnt++;                    /* Ready to send */
                dw32(DCR1, 0x1);                        /* Issue Tx polling */
-               dev->trans_start = jiffies;             /* saved time stamp */
+               netif_trans_update(dev);                /* saved time stamp */
        } else {
                db->tx_queue_cnt++;                     /* queue TX packet */
                dw32(DCR1, 0x1);                        /* Issue Tx polling */
@@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
  *     The interface is stopped when it is brought.
  */
 
-static int dmfe_stop(struct DEVICE *dev)
+static int dmfe_stop(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        void __iomem *ioaddr = db->ioaddr;
@@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev)
 
 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 {
-       struct DEVICE *dev = dev_id;
+       struct net_device *dev = dev_id;
        struct dmfe_board_info *db = netdev_priv(dev);
        void __iomem *ioaddr = db->ioaddr;
        unsigned long flags;
@@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev)
  *     Free TX resource after TX complete
  */
 
-static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 {
        struct tx_desc *txptr;
        void __iomem *ioaddr = db->ioaddr;
@@ -934,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
                db->tx_packet_cnt++;                    /* Ready to send */
                db->tx_queue_cnt--;
                dw32(DCR1, 0x1);                        /* Issue Tx polling */
-               dev->trans_start = jiffies;             /* saved time stamp */
+               netif_trans_update(dev);                /* saved time stamp */
        }
 
        /* Resource available check */
@@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
  *     Receive the come packet and pass to upper layer
  */
 
-static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 {
        struct rx_desc *rxptr;
        struct sk_buff *skb, *newskb;
@@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
  * Set DM910X multicast address
  */
 
-static void dmfe_set_filter_mode(struct DEVICE * dev)
+static void dmfe_set_filter_mode(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        unsigned long flags;
@@ -1545,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev)
                update_cr6(db->cr6_data | 0x2000, ioaddr);
                dw32(DCR1, 0x1);        /* Issue Tx polling */
                update_cr6(db->cr6_data, ioaddr);
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
        } else
                db->tx_queue_cnt++;     /* Put in TX queue */
 }
index 5364563c4378a921ca41dc3c53c3cab07e339dd4..7bcccf5cac7acbfc37e8b7e82c10c3d06ed38213 100644 (file)
@@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev)
                        tp->csr6 = new_csr6;
                        /* Restart Tx */
                        tulip_restart_rxtx(tp);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                }
        }
 }
@@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
                        iowrite32(tp->csr6, ioaddr + CSR6);
                        iowrite32(0x30, ioaddr + CSR12);
                        iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                }
        } else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
                if (tulip_media_cap[dev->if_port] & MediaIsMII) {
@@ -147,7 +147,7 @@ void pnic_timer(unsigned long data)
                                tp->csr6 = new_csr6;
                                /* Restart Tx */
                                tulip_restart_rxtx(tp);
-                               dev->trans_start = jiffies;
+                               netif_trans_update(dev);
                                if (tulip_debug > 1)
                                        dev_info(&dev->dev,
                                                 "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
index 94d0eebef129fa2b5c27622d223e34c3d999c96f..bbde90bc74fe244f60f925099a77ea50d94084e5 100644 (file)
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
 
 out_unlock:
        spin_unlock_irqrestore (&tp->lock, flags);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue (dev);
 }
 
index 447d09272ab7ab6d3485073ffcee608dbbf622eb..e750b5ddc0fbb34436f9df49e9cd62ebc5eb0700 100644 (file)
@@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
                db->tx_packet_cnt++;                    /* Ready to send */
                uw32(DCR1, 0x1);                        /* Issue Tx polling */
-               dev->trans_start = jiffies;             /* saved time stamp */
+               netif_trans_update(dev);                /* saved time stamp */
        }
 
        /* Tx resource check */
@@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
                update_cr6(db->cr6_data | 0x2000, ioaddr);
                uw32(DCR1, 0x1);        /* Issue Tx polling */
                update_cr6(db->cr6_data, ioaddr);
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
        } else
                netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
 }
index 3c0e4d5c5fef41a8fe1fe99c0a163d30cd74fc31..1f62b942385101bc475b7d6219b572aebc267cfb 100644 (file)
@@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev)
        enable_irq(irq);
 
        netif_wake_queue(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        np->stats.tx_errors++;
 }
 
index f92b6d9483982bbed33fa8f1dfee73ac67a915e6..78f144696d6b335f90e642709864f1683388d44a 100644 (file)
@@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev)
                dev->name, dr32(TxStatus));
        rio_free_tx(dev, 0);
        dev->if_port = 0;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static netdev_tx_t
index a28a2e583f0fe48ffa3eb1194dc93a16984f1d0b..58c6338a839e194319146ebe69e282db69db5465 100644 (file)
@@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev)
 
        dev->if_port = 0;
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
                netif_wake_queue(dev);
index 536686476369bfb242ceca0e28a436d150a1ed15..ed98ef1ecac38d77edf7bb66c206d0099c0ddfef 100644 (file)
@@ -4890,11 +4890,13 @@ static int be_resume(struct be_adapter *adapter)
        if (status)
                return status;
 
-       if (netif_running(netdev)) {
+       rtnl_lock();
+       if (netif_running(netdev))
                status = be_open(netdev);
-               if (status)
-                       return status;
-       }
+       rtnl_unlock();
+
+       if (status)
+               return status;
 
        netif_device_attach(netdev);
 
index b1b9ebafb354d28676f85b5b9522655e0daeaff1..c08bd763172a76da6ae68a00d55e06992d6cacdd 100644 (file)
@@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
 
        spin_unlock_irqrestore(&np->lock, flags);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        netif_wake_queue(dev); /* or .._start_.. ?? */
 }
index 195122e11f1060de30b2fb34de72f7db31ef659f..f58f9ea51639ca9ab60018a34a1df2ffe3fd3d40 100644 (file)
@@ -517,7 +517,6 @@ struct fec_enet_private {
 
        /* Phylib and MDIO interface */
        struct  mii_bus *mii_bus;
-       struct  phy_device *phy_dev;
        int     mii_timeout;
        uint    phy_speed;
        phy_interface_t phy_interface;
index 08243c2ff4b4ae3d201c95c0bbc9caa5a7bd776c..ca2cccc594fdc240d4951cd8deb0b6232aa80c2f 100644 (file)
@@ -967,10 +967,10 @@ fec_restart(struct net_device *ndev)
                        rcntl &= ~(1 << 8);
 
                /* 1G, 100M or 10M */
-               if (fep->phy_dev) {
-                       if (fep->phy_dev->speed == SPEED_1000)
+               if (ndev->phydev) {
+                       if (ndev->phydev->speed == SPEED_1000)
                                ecntl |= (1 << 5);
-                       else if (fep->phy_dev->speed == SPEED_100)
+                       else if (ndev->phydev->speed == SPEED_100)
                                rcntl &= ~(1 << 9);
                        else
                                rcntl |= (1 << 9);
@@ -991,7 +991,7 @@ fec_restart(struct net_device *ndev)
                         */
                        cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
                                ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
-                       if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+                       if (ndev->phydev && ndev->phydev->speed == SPEED_10)
                                cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
                        writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
 
@@ -1005,7 +1005,7 @@ fec_restart(struct net_device *ndev)
        /* enable pause frame*/
        if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
            ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
-            fep->phy_dev && fep->phy_dev->pause)) {
+            ndev->phydev && ndev->phydev->pause)) {
                rcntl |= FEC_ENET_FCE;
 
                /* set FIFO threshold parameter to reduce overrun */
@@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
        struct fec_enet_private *fep = netdev_priv(ndev);
 
        for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-               clear_bit(queue_id, &fep->work_rx);
-               pkt_received += fec_enet_rx_queue(ndev,
+               int ret;
+
+               ret = fec_enet_rx_queue(ndev,
                                        budget - pkt_received, queue_id);
+
+               if (ret < budget - pkt_received)
+                       clear_bit(queue_id, &fep->work_rx);
+
+               pkt_received += ret;
        }
        return pkt_received;
 }
@@ -1679,7 +1685,7 @@ static void fec_get_mac(struct net_device *ndev)
 static void fec_enet_adjust_link(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       struct phy_device *phy_dev = fep->phy_dev;
+       struct phy_device *phy_dev = ndev->phydev;
        int status_change = 0;
 
        /* Prevent a state halted on mii error */
@@ -1879,8 +1885,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        int phy_id;
        int dev_id = fep->dev_id;
 
-       fep->phy_dev = NULL;
-
        if (fep->phy_node) {
                phy_dev = of_phy_connect(ndev, fep->phy_node,
                                         &fec_enet_adjust_link, 0,
@@ -1928,7 +1932,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
 
        phy_dev->advertising = phy_dev->supported;
 
-       fep->phy_dev = phy_dev;
        fep->link = 0;
        fep->full_duplex = 0;
 
@@ -2058,30 +2061,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
        }
 }
 
-static int fec_enet_get_settings(struct net_device *ndev,
-                                 struct ethtool_cmd *cmd)
-{
-       struct fec_enet_private *fep = netdev_priv(ndev);
-       struct phy_device *phydev = fep->phy_dev;
-
-       if (!phydev)
-               return -ENODEV;
-
-       return phy_ethtool_gset(phydev, cmd);
-}
-
-static int fec_enet_set_settings(struct net_device *ndev,
-                                struct ethtool_cmd *cmd)
-{
-       struct fec_enet_private *fep = netdev_priv(ndev);
-       struct phy_device *phydev = fep->phy_dev;
-
-       if (!phydev)
-               return -ENODEV;
-
-       return phy_ethtool_sset(phydev, cmd);
-}
-
 static void fec_enet_get_drvinfo(struct net_device *ndev,
                                 struct ethtool_drvinfo *info)
 {
@@ -2214,7 +2193,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (!fep->phy_dev)
+       if (!ndev->phydev)
                return -ENODEV;
 
        if (pause->tx_pause != pause->rx_pause) {
@@ -2230,17 +2209,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
        fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
 
        if (pause->rx_pause || pause->autoneg) {
-               fep->phy_dev->supported |= ADVERTISED_Pause;
-               fep->phy_dev->advertising |= ADVERTISED_Pause;
+               ndev->phydev->supported |= ADVERTISED_Pause;
+               ndev->phydev->advertising |= ADVERTISED_Pause;
        } else {
-               fep->phy_dev->supported &= ~ADVERTISED_Pause;
-               fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+               ndev->phydev->supported &= ~ADVERTISED_Pause;
+               ndev->phydev->advertising &= ~ADVERTISED_Pause;
        }
 
        if (pause->autoneg) {
                if (netif_running(ndev))
                        fec_stop(ndev);
-               phy_start_aneg(fep->phy_dev);
+               phy_start_aneg(ndev->phydev);
        }
        if (netif_running(ndev)) {
                napi_disable(&fep->napi);
@@ -2356,8 +2335,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
 
 static int fec_enet_nway_reset(struct net_device *dev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
-       struct phy_device *phydev = fep->phy_dev;
+       struct phy_device *phydev = dev->phydev;
 
        if (!phydev)
                return -ENODEV;
@@ -2562,8 +2540,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 }
 
 static const struct ethtool_ops fec_enet_ethtool_ops = {
-       .get_settings           = fec_enet_get_settings,
-       .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
        .get_regs_len           = fec_enet_get_regs_len,
        .get_regs               = fec_enet_get_regs,
@@ -2583,12 +2559,14 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
        .set_tunable            = fec_enet_set_tunable,
        .get_wol                = fec_enet_get_wol,
        .set_wol                = fec_enet_set_wol,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       struct phy_device *phydev = fep->phy_dev;
+       struct phy_device *phydev = ndev->phydev;
 
        if (!netif_running(ndev))
                return -EINVAL;
@@ -2843,7 +2821,7 @@ fec_enet_open(struct net_device *ndev)
                goto err_enet_mii_probe;
 
        napi_enable(&fep->napi);
-       phy_start(fep->phy_dev);
+       phy_start(ndev->phydev);
        netif_tx_start_all_queues(ndev);
 
        device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
@@ -2867,7 +2845,7 @@ fec_enet_close(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       phy_stop(fep->phy_dev);
+       phy_stop(ndev->phydev);
 
        if (netif_device_present(ndev)) {
                napi_disable(&fep->napi);
@@ -2875,8 +2853,7 @@ fec_enet_close(struct net_device *ndev)
                fec_stop(ndev);
        }
 
-       phy_disconnect(fep->phy_dev);
-       fep->phy_dev = NULL;
+       phy_disconnect(ndev->phydev);
 
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -3504,7 +3481,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
        if (netif_running(ndev)) {
                if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
                        fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
-               phy_stop(fep->phy_dev);
+               phy_stop(ndev->phydev);
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                netif_device_detach(ndev);
@@ -3564,7 +3541,7 @@ static int __maybe_unused fec_resume(struct device *dev)
                netif_device_attach(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
-               phy_start(fep->phy_dev);
+               phy_start(ndev->phydev);
        }
        rtnl_unlock();
 
index 25553ee857b4671e08904a015dd8daf854b9a90d..f44471485d001c45925926d1679ea5084a8070a5 100644 (file)
@@ -763,24 +763,28 @@ static void mpc52xx_fec_reset(struct net_device *dev)
 
 /* ethtool interface */
 
-static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int mpc52xx_fec_get_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
 
        if (!priv->phydev)
                return -ENODEV;
 
-       return phy_ethtool_gset(priv->phydev, cmd);
+       return phy_ethtool_ksettings_get(phydev, cmd);
 }
 
-static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int mpc52xx_fec_set_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
 
        if (!priv->phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(priv->phydev, cmd);
+       return phy_ethtool_ksettings_set(phydev, cmd);
 }
 
 static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
@@ -796,12 +800,12 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
 }
 
 static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
-       .get_settings = mpc52xx_fec_get_settings,
-       .set_settings = mpc52xx_fec_set_settings,
        .get_link = ethtool_op_get_link,
        .get_msglevel = mpc52xx_fec_get_msglevel,
        .set_msglevel = mpc52xx_fec_set_msglevel,
        .get_ts_info = ethtool_op_get_ts_info,
+       .get_link_ksettings = mpc52xx_fec_get_ksettings,
+       .set_link_ksettings = mpc52xx_fec_set_ksettings,
 };
 
 
index 48a9c176e0d1597bb13603aebc9776dbd1020442..da90b5ad6e36a55ab5a1df239b69a5f5e26f55a9 100644 (file)
@@ -847,24 +847,28 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                regs->version = 0;
 }
 
-static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int fs_get_ksettings(struct net_device *dev,
+                           struct ethtool_link_ksettings *cmd)
 {
        struct fs_enet_private *fep = netdev_priv(dev);
+       struct phy_device *phydev = fep->phydev;
 
        if (!fep->phydev)
                return -ENODEV;
 
-       return phy_ethtool_gset(fep->phydev, cmd);
+       return phy_ethtool_ksettings_get(phydev, cmd);
 }
 
-static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int fs_set_ksettings(struct net_device *dev,
+                           const struct ethtool_link_ksettings *cmd)
 {
        struct fs_enet_private *fep = netdev_priv(dev);
+       struct phy_device *phydev = fep->phydev;
 
        if (!fep->phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(fep->phydev, cmd);
+       return phy_ethtool_ksettings_set(phydev, cmd);
 }
 
 static int fs_nway_reset(struct net_device *dev)
@@ -887,14 +891,14 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
 static const struct ethtool_ops fs_ethtool_ops = {
        .get_drvinfo = fs_get_drvinfo,
        .get_regs_len = fs_get_regs_len,
-       .get_settings = fs_get_settings,
-       .set_settings = fs_set_settings,
        .nway_reset = fs_nway_reset,
        .get_link = ethtool_op_get_link,
        .get_msglevel = fs_get_msglevel,
        .set_msglevel = fs_set_msglevel,
        .get_regs = fs_get_regs,
        .get_ts_info = ethtool_op_get_ts_info,
+       .get_link_ksettings = fs_get_ksettings,
+       .set_link_ksettings = fs_set_ksettings,
 };
 
 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index d2f917af539f35b8f994507c5c3758944b1d9d0d..a5800413f9178f8d0e9f9fd3969f0f23fcd72cae 100644 (file)
@@ -2076,7 +2076,7 @@ void gfar_start(struct gfar_private *priv)
 
        gfar_ints_enable(priv);
 
-       priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(priv->ndev); /* prevent tx timeout */
 }
 
 static void free_grp_irqs(struct gfar_priv_grp *grp)
index 4b0ee855edd72fad9b748c2d7e41e30fb2aae9b1..2c45c80d9b03e28c16c56529bc054ab0b10f2449 100644 (file)
@@ -185,7 +185,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
 }
 
 
-static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gfar_set_ksettings(struct net_device *dev,
+                             const struct ethtool_link_ksettings *cmd)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -193,29 +194,19 @@ static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
        if (NULL == phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(phydev, cmd);
+       return phy_ethtool_ksettings_set(phydev, cmd);
 }
 
-
-/* Return the current settings in the ethtool_cmd structure */
-static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gfar_get_ksettings(struct net_device *dev,
+                             struct ethtool_link_ksettings *cmd)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
-       struct gfar_priv_rx_q *rx_queue = NULL;
-       struct gfar_priv_tx_q *tx_queue = NULL;
 
        if (NULL == phydev)
                return -ENODEV;
-       tx_queue = priv->tx_queue[0];
-       rx_queue = priv->rx_queue[0];
-
-       /* etsec-1.7 and older versions have only one txic
-        * and rxic regs although they support multiple queues */
-       cmd->maxtxpkt = get_icft_value(tx_queue->txic);
-       cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
 
-       return phy_ethtool_gset(phydev, cmd);
+       return phy_ethtool_ksettings_get(phydev, cmd);
 }
 
 /* Return the length of the register structure */
@@ -1565,8 +1556,6 @@ static int gfar_get_ts_info(struct net_device *dev,
 }
 
 const struct ethtool_ops gfar_ethtool_ops = {
-       .get_settings = gfar_gsettings,
-       .set_settings = gfar_ssettings,
        .get_drvinfo = gfar_gdrvinfo,
        .get_regs_len = gfar_reglen,
        .get_regs = gfar_get_regs,
@@ -1589,4 +1578,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
        .set_rxnfc = gfar_set_nfc,
        .get_rxnfc = gfar_get_nfc,
        .get_ts_info = gfar_get_ts_info,
+       .get_link_ksettings = gfar_get_ksettings,
+       .set_link_ksettings = gfar_set_ksettings,
 };
index 89714f5e0dfc57b2246affedf002e19da0d17c3c..812a968a78e9808c37fa9bef1d308b42fead4d95 100644 (file)
@@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
 #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
 
 static int
-uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
 {
        struct ucc_geth_private *ugeth = netdev_priv(netdev);
        struct phy_device *phydev = ugeth->phydev;
-       struct ucc_geth_info *ug_info = ugeth->ug_info;
 
        if (!phydev)
                return -ENODEV;
 
-       ecmd->maxtxpkt = 1;
-       ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-
-       return phy_ethtool_gset(phydev, ecmd);
+       return phy_ethtool_ksettings_get(phydev, cmd);
 }
 
 static int
-uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_set_ksettings(struct net_device *netdev,
+                 const struct ethtool_link_ksettings *cmd)
 {
        struct ucc_geth_private *ugeth = netdev_priv(netdev);
        struct phy_device *phydev = ugeth->phydev;
@@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        if (!phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(phydev, ecmd);
+       return phy_ethtool_ksettings_set(phydev, cmd);
 }
 
 static void
@@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 #endif /* CONFIG_PM */
 
 static const struct ethtool_ops uec_ethtool_ops = {
-       .get_settings           = uec_get_settings,
-       .set_settings           = uec_set_settings,
        .get_drvinfo            = uec_get_drvinfo,
        .get_regs_len           = uec_get_regs_len,
        .get_regs               = uec_get_regs,
@@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = {
        .get_wol                = uec_get_wol,
        .set_wol                = uec_set_wol,
        .get_ts_info            = ethtool_op_get_ts_info,
+       .get_link_ksettings     = uec_get_ksettings,
+       .set_link_ksettings     = uec_set_ksettings,
 };
 
 void uec_set_ethtool_ops(struct net_device *netdev)
index 678f5018d0be1c410c809fd8031a9f6e6601210e..399cfd217288d9e32b0fd9ba188e974a4663a87e 100644 (file)
@@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
            lp->sent = lp->tx_queue ;
            lp->tx_queue = 0;
            lp->tx_queue_len = 0;
-           dev->trans_start = jiffies;
+           netif_trans_update(dev);
        } else {
            lp->tx_started = 0;
        }
index e51892d518ff3ccca8aaed43838d7343b4844dc3..b9f2ea59308a6869eeca58cc1e2b6d9b7d56ea49 100644 (file)
@@ -636,7 +636,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
        pos = dma_ring_incr(pos, TX_DESC_NUM);
        writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
        netdev_sent_queue(dev, skb->len);
index 159142272afbd07df97ae5b090e3a6428a2d127d..7a757e88c89a78c0847a5abd0d83827c89bc3f67 100644 (file)
@@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
        return vf_cb->mac_cb;
 }
 
-/**
- * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
- * @port_id: enet port id
- *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
- * return: dsaf port id
- *: service ports 0 - 5, debug port 6-7
- **/
-static int hns_ae_map_eport_to_dport(u32 port_id)
-{
-       int port_index;
-
-       if (port_id < DSAF_DEBUG_NW_NUM)
-               port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
-       else
-               port_index = port_id - DSAF_DEBUG_NW_NUM;
-
-       return port_index;
-}
-
 static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
 {
        return container_of(dev, struct dsaf_device, ae_dev);
@@ -56,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
 static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
 {
        int ppe_index;
-       int ppe_common_index;
        struct ppe_common_cb *ppe_comm;
        struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 
-       if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
-               ppe_index = vf_cb->port_index;
-               ppe_common_index = 0;
-       } else {
-               ppe_index = 0;
-               ppe_common_index =
-                       vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-       }
-       ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+       ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+       ppe_index = vf_cb->port_index;
+
        return &ppe_comm->ppe_cb[ppe_index];
 }
 
 static int hns_ae_get_q_num_per_vf(
        struct dsaf_device *dsaf_dev, int port)
 {
-       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
-       return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+       return dsaf_dev->rcb_common[0]->max_q_per_vf;
 }
 
 static int hns_ae_get_vf_num_per_port(
        struct dsaf_device *dsaf_dev, int port)
 {
-       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
-       return dsaf_dev->rcb_common[common_idx]->max_vfn;
+       return dsaf_dev->rcb_common[0]->max_vfn;
 }
 
 static struct ring_pair_cb *hns_ae_get_base_ring_pair(
        struct dsaf_device *dsaf_dev, int port)
 {
-       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
        int q_num = rcb_comm->max_q_per_vf;
        int vf_num = rcb_comm->max_vfn;
 
-       if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
-               return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
-       else
-               return &rcb_comm->ring_pair_cb[0];
+       return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
 }
 
 static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
@@ -110,7 +76,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
 struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
                                      u32 port_id)
 {
-       int port_idx;
        int vfnum_per_port;
        int qnum_per_vf;
        int i;
@@ -120,11 +85,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
        struct hnae_vf_cb *vf_cb;
 
        dsaf_dev = hns_ae_get_dsaf_dev(dev);
-       port_idx = hns_ae_map_eport_to_dport(port_id);
 
-       ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
-       vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
-       qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+       ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+       vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+       qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
 
        vf_cb = kzalloc(sizeof(*vf_cb) +
                        qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
@@ -163,14 +127,14 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
        }
 
        vf_cb->dsaf_dev = dsaf_dev;
-       vf_cb->port_index = port_idx;
-       vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+       vf_cb->port_index = port_id;
+       vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
 
        ae_handle->phy_if = vf_cb->mac_cb->phy_if;
        ae_handle->phy_node = vf_cb->mac_cb->phy_node;
        ae_handle->if_support = vf_cb->mac_cb->if_support;
        ae_handle->port_type = vf_cb->mac_cb->mac_type;
-       ae_handle->dport_id = port_idx;
+       ae_handle->dport_id = port_id;
 
        return ae_handle;
 vf_id_err:
@@ -320,11 +284,8 @@ static void hns_ae_reset(struct hnae_handle *handle)
        struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 
        if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
-               u8 ppe_common_index =
-                       vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-
                hns_mac_reset(vf_cb->mac_cb);
-               hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+               hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
        }
 }
 
@@ -703,7 +664,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
 
        assert(handle);
        mac_cb = hns_get_mac_cb(handle);
-       if (!mac_cb->cpld_vaddr)
+       if (!mac_cb->cpld_ctrl)
                return;
        hns_set_led_opt(mac_cb);
 }
@@ -723,7 +684,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
 void hns_ae_get_regs(struct hnae_handle *handle, void *data)
 {
        u32 *p = data;
-       u32 rcb_com_idx;
        int i;
        struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
        struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
@@ -731,8 +691,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
        hns_ppe_get_regs(ppe_cb, p);
        p += hns_ppe_get_regs_count();
 
-       rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
-       hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+       hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
        p += hns_rcb_get_common_regs_count();
 
        for (i = 0; i < handle->q_num; i++) {
index 10c367d209556bbe7a3c602acdd6338c9b5e6e63..611581fccf2a4eb1d569514d4c6613ebf54fee3e 100644 (file)
@@ -7,18 +7,19 @@
  * (at your option) any later version.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
 #include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
 
-#include "hns_dsaf_misc.h"
 #include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
 #include "hns_dsaf_rcb.h"
 
 #define MAC_EN_FLAG_V          0xada0328
@@ -81,17 +82,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
        }
 }
 
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
-{
-       if (!mac_cb->cpld_vaddr)
-               return -ENODEV;
-
-       *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
-                                       + MAC_SFP_PORT_OFFSET);
-
-       return 0;
-}
-
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
 {
        struct mac_driver *mac_ctrl_drv;
@@ -168,10 +158,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
                                      u8 vmid, u8 *port_num)
 {
        u8 tmp_port;
-       u32 comm_idx;
 
        if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
-               if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+               if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
                        dev_err(mac_cb->dev,
                                "input invalid,%s mac%d vmid%d !\n",
                                mac_cb->dsaf_dev->ae_dev.name,
@@ -179,7 +168,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
                        return -EINVAL;
                }
        } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
-               if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+               if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
                        dev_err(mac_cb->dev,
                                "input invalid,%s mac%d vmid%d!\n",
                                mac_cb->dsaf_dev->ae_dev.name,
@@ -192,9 +181,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
                return -EINVAL;
        }
 
-       comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
-
-       if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+       if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
                dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
                        mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
                return -EINVAL;
@@ -234,7 +221,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
 }
 
 /**
- *hns_mac_get_inner_port_num - change vf mac address
+ *hns_mac_change_vf_addr - change vf mac address
  *@mac_cb: mac device
  *@vmid: vmid
  *@addr:mac address
@@ -249,7 +236,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
        struct mac_entry_idx *old_entry;
 
        old_entry = &mac_cb->addr_entry_idx[vmid];
-       if (dsaf_dev) {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
                memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
                mac_entry.in_vlan_id = old_entry->vlan_id;
                mac_entry.in_port_num = mac_cb->mac_id;
@@ -289,7 +276,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
        struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
        struct dsaf_drv_mac_single_dest_entry mac_entry;
 
-       if (dsaf_dev && addr) {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
                memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
                mac_entry.in_vlan_id = 0;/*vlan_id;*/
                mac_entry.in_port_num = mac_cb->mac_id;
@@ -380,7 +367,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
        if (mac_cb->mac_type == HNAE_PORT_DEBUG)
                return 0;
 
-       if (dsaf_dev) {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
                memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
                mac_entry.in_vlan_id = vlan_id;
                mac_entry.in_port_num = mac_cb->mac_id;
@@ -418,7 +405,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
 
        uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
 
-       if (dsaf_dev)  {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev))  {
                memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
                mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
                mac_entry.in_port_num = mac_cb->mac_id;
@@ -651,14 +638,18 @@ free_mac_drv:
 }
 
 /**
- *mac_free_dev  - get mac information from device node
+ *hns_mac_get_info  - get mac information from device node
  *@mac_cb: mac device
  *@np:device node
- *@mac_mode_idx:mac mode index
+ * return: 0 --success, negative --fail
  */
-static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
-                            struct device_node *np, u32 mac_mode_idx)
+static int  hns_mac_get_info(struct hns_mac_cb *mac_cb)
 {
+       struct device_node *np = mac_cb->dev->of_node;
+       struct regmap *syscon;
+       struct of_phandle_args cpld_args;
+       u32 ret;
+
        mac_cb->link = false;
        mac_cb->half_duplex = false;
        mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
@@ -674,12 +665,73 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
 
        mac_cb->max_frm = MAC_DEFAULT_MTU;
        mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
-
-       /* Get the rest of the PHY information */
-       mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+       mac_cb->port_rst_off = mac_cb->mac_id;
+       mac_cb->port_mode_off = 0;
+
+       /* if the dsaf node doesn't contain a port subnode, get phy-handle
+        * from dsaf node
+        */
+       if (!mac_cb->fw_port) {
+               mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
+                                                   mac_cb->mac_id);
+               if (mac_cb->phy_node)
+                       dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+                               mac_cb->mac_id, mac_cb->phy_node->name);
+               return 0;
+       }
+       if (!is_of_node(mac_cb->fw_port))
+               return -EINVAL;
+       /* parse property from port subnode in dsaf */
+       mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
+                                           "phy-handle", 0);
        if (mac_cb->phy_node)
                dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
                        mac_cb->mac_id, mac_cb->phy_node->name);
+       syscon = syscon_node_to_regmap(
+                       of_parse_phandle(to_of_node(mac_cb->fw_port),
+                                        "serdes-syscon", 0));
+       if (IS_ERR_OR_NULL(syscon)) {
+               dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+               return -EINVAL;
+       }
+       mac_cb->serdes_ctrl = syscon;
+
+       ret = fwnode_property_read_u32(mac_cb->fw_port,
+                                      "port-rst-offset",
+                                      &mac_cb->port_rst_off);
+       if (ret) {
+               dev_dbg(mac_cb->dev,
+                       "mac%d port-rst-offset not found, use default value.\n",
+                       mac_cb->mac_id);
+       }
+
+       ret = fwnode_property_read_u32(mac_cb->fw_port,
+                                      "port-mode-offset",
+                                      &mac_cb->port_mode_off);
+       if (ret) {
+               dev_dbg(mac_cb->dev,
+                       "mac%d port-mode-offset not found, use default value.\n",
+                       mac_cb->mac_id);
+       }
+
+       ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
+                                              "cpld-syscon", 1, 0, &cpld_args);
+       if (ret) {
+               dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+                       mac_cb->mac_id);
+               mac_cb->cpld_ctrl = NULL;
+       } else {
+               syscon = syscon_node_to_regmap(cpld_args.np);
+               if (IS_ERR_OR_NULL(syscon)) {
+                       dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+                       mac_cb->cpld_ctrl = NULL;
+               } else {
+                       mac_cb->cpld_ctrl = syscon;
+                       mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+               }
+       }
+
+       return 0;
 }
 
 /**
@@ -709,40 +761,31 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
                return base + 0x40000 + mac_id * 0x4000 -
                                mac_mode_idx * 0x20000;
        else
-               return mac_cb->serdes_vaddr + 0x1000
-                       + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+               return dsaf_dev->ppe_base + 0x1000;
 }
 
 /**
  * hns_mac_get_cfg - get mac cfg from dtb or acpi table
  * @dsaf_dev: dsa fabric device struct pointer
- * @mac_idx: mac index
- * retuen 0 - success , negative --fail
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
  */
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
 {
        int ret;
        u32 mac_mode_idx;
-       struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
 
        mac_cb->dsaf_dev = dsaf_dev;
        mac_cb->dev = dsaf_dev->dev;
-       mac_cb->mac_id = mac_idx;
 
        mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
        mac_cb->serdes_vaddr = dsaf_dev->sds_base;
 
-       if (dsaf_dev->cpld_base &&
-           mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
-               mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
-                       mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
-               cpld_led_reset(mac_cb);
-       }
        mac_cb->sfp_prsnt = 0;
        mac_cb->txpkt_for_led = 0;
        mac_cb->rxpkt_for_led = 0;
 
-       if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
                mac_cb->mac_type = HNAE_PORT_SERVICE;
        else
                mac_cb->mac_type = HNAE_PORT_DEBUG;
@@ -758,53 +801,100 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
        }
        mac_mode_idx = (u32)ret;
 
-       hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+       ret  = hns_mac_get_info(mac_cb);
+       if (ret)
+               return ret;
 
+       cpld_led_reset(mac_cb);
        mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
 
        return 0;
 }
 
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+       if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+               return 1;
+       else
+               return  DSAF_MAX_PORT_NUM;
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
  */
 int hns_mac_init(struct dsaf_device *dsaf_dev)
 {
-       int i;
+       bool found = false;
        int ret;
-       size_t size;
+       u32 port_id;
+       int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
        struct hns_mac_cb *mac_cb;
+       struct fwnode_handle *child;
 
-       size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
-       dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
-       if (!dsaf_dev->mac_cb)
-               return -ENOMEM;
+       device_for_each_child_node(dsaf_dev->dev, child) {
+               ret = fwnode_property_read_u32(child, "reg", &port_id);
+               if (ret) {
+                       dev_err(dsaf_dev->dev,
+                               "get reg fail, ret=%d!\n", ret);
+                       return ret;
+               }
+               if (port_id >= max_port_num) {
+                       dev_err(dsaf_dev->dev,
+                               "reg(%u) out of range!\n", port_id);
+                       return -EINVAL;
+               }
+               mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+                                     GFP_KERNEL);
+               if (!mac_cb)
+                       return -ENOMEM;
+               mac_cb->fw_port = child;
+               mac_cb->mac_id = (u8)port_id;
+               dsaf_dev->mac_cb[port_id] = mac_cb;
+               found = true;
+       }
 
-       for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
-               ret = hns_mac_get_cfg(dsaf_dev, i);
-               if (ret)
-                       goto free_mac_cb;
+       /* if don't get any port subnode from dsaf node
+        * will init all port then, this is compatible with the old dts
+        */
+       if (!found) {
+               for (port_id = 0; port_id < max_port_num; port_id++) {
+                       mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+                                             GFP_KERNEL);
+                       if (!mac_cb)
+                               return -ENOMEM;
+
+                       mac_cb->mac_id = port_id;
+                       dsaf_dev->mac_cb[port_id] = mac_cb;
+               }
+       }
+       /* init mac_cb for all port */
+       for (port_id = 0; port_id < max_port_num; port_id++) {
+               mac_cb = dsaf_dev->mac_cb[port_id];
+               if (!mac_cb)
+                       continue;
 
-               mac_cb = &dsaf_dev->mac_cb[i];
+               ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+               if (ret)
+                       return ret;
                ret = hns_mac_init_ex(mac_cb);
                if (ret)
-                       goto free_mac_cb;
+                       return ret;
        }
 
        return 0;
-
-free_mac_cb:
-       dsaf_dev->mac_cb = NULL;
-
-       return ret;
 }
 
 void hns_mac_uninit(struct dsaf_device *dsaf_dev)
 {
-       cpld_led_reset(dsaf_dev->mac_cb);
-       dsaf_dev->mac_cb = NULL;
+       int i;
+       int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+       for (i = 0; i < max_port_num; i++) {
+               cpld_led_reset(dsaf_dev->mac_cb[i]);
+               dsaf_dev->mac_cb[i] = NULL;
+       }
 }
 
 int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
@@ -892,7 +982,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
                        enum hnae_led_state status)
 {
-       if (!mac_cb || !mac_cb->cpld_vaddr)
+       if (!mac_cb || !mac_cb->cpld_ctrl)
                return 0;
 
        return cpld_set_led_id(mac_cb, status);
index 823b6e78c8aadc36b239fcde643ad34790130274..97ce9a750aaf47d39b5b0d604eb7845e42628676 100644 (file)
 #ifndef _HNS_DSAF_MAC_H
 #define _HNS_DSAF_MAC_H
 
-#include <linux/phy.h>
-#include <linux/kernel.h>
 #include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
 #include "hns_dsaf_main.h"
 
 struct dsaf_device;
@@ -310,10 +311,15 @@ struct hns_mac_cb {
        struct device *dev;
        struct dsaf_device *dsaf_dev;
        struct mac_priv priv;
+       struct fwnode_handle *fw_port;
        u8 __iomem *vaddr;
-       u8 __iomem *cpld_vaddr;
        u8 __iomem *sys_ctl_vaddr;
        u8 __iomem *serdes_vaddr;
+       struct regmap *serdes_ctrl;
+       struct regmap *cpld_ctrl;
+       u32 cpld_ctrl_reg;
+       u32 port_rst_off;
+       u32 port_mode_off;
        struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
        u8 sfp_prsnt;
        u8 cpld_led_value;
index 8439f6d8e360acfd05a4534f53f192556138966a..1c2ddb25e77688cf6617d48ff73ad37db9f86c96 100644 (file)
@@ -7,27 +7,29 @@
  * (at your option) any later version.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
-#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 
+#include "hns_dsaf_mac.h"
 #include "hns_dsaf_main.h"
-#include "hns_dsaf_rcb.h"
 #include "hns_dsaf_ppe.h"
-#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
 
 const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
        [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
        [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
        [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+       [DSAF_MODE_DISABLE_SP] = "single-port",
 };
 
 int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
@@ -35,8 +37,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        int ret, i;
        u32 desc_num;
        u32 buf_size;
+       u32 reset_offset = 0;
+       u32 res_idx = 0;
        const char *mode_str;
+       struct regmap *syscon;
+       struct resource *res;
        struct device_node *np = dsaf_dev->dev->of_node;
+       struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
 
        if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
                dsaf_dev->dsaf_ver = AE_VERSION_1;
@@ -73,42 +80,68 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        else
                dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
 
-       dsaf_dev->sc_base = of_iomap(np, 0);
-       if (!dsaf_dev->sc_base) {
-               dev_err(dsaf_dev->dev,
-                       "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
-               ret = -ENOMEM;
-               goto unmap_base_addr;
-       }
+       syscon = syscon_node_to_regmap(
+                       of_parse_phandle(np, "subctrl-syscon", 0));
+       if (IS_ERR_OR_NULL(syscon)) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+               if (!res) {
+                       dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+                       return -ENOMEM;
+               }
+               dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
+               if (!dsaf_dev->sc_base) {
+                       dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+                       return -ENOMEM;
+               }
 
-       dsaf_dev->sds_base = of_iomap(np, 1);
-       if (!dsaf_dev->sds_base) {
-               dev_err(dsaf_dev->dev,
-                       "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
-               ret = -ENOMEM;
-               goto unmap_base_addr;
+               res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+               if (!res) {
+                       dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+                       return -ENOMEM;
+               }
+               dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
+               if (!dsaf_dev->sds_base) {
+                       dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+                       return -ENOMEM;
+               }
+       } else {
+               dsaf_dev->sub_ctrl = syscon;
        }
 
-       dsaf_dev->ppe_base = of_iomap(np, 2);
-       if (!dsaf_dev->ppe_base) {
-               dev_err(dsaf_dev->dev,
-                       "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
-               ret = -ENOMEM;
-               goto unmap_base_addr;
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+       if (!res) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+               if (!res) {
+                       dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+                       return -ENOMEM;
+               }
        }
-
-       dsaf_dev->io_base = of_iomap(np, 3);
-       if (!dsaf_dev->io_base) {
-               dev_err(dsaf_dev->dev,
-                       "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
-               ret = -ENOMEM;
-               goto unmap_base_addr;
+       dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+       if (!dsaf_dev->ppe_base) {
+               dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+               return -ENOMEM;
+       }
+       dsaf_dev->ppe_paddr = res->start;
+
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  "dsaf-base");
+               if (!res) {
+                       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                                   res_idx);
+                       if (!res) {
+                               dev_err(dsaf_dev->dev,
+                                       "dsaf-base info is needed!\n");
+                               return -ENOMEM;
+                       }
+               }
+               dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+               if (!dsaf_dev->io_base) {
+                       dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+                       return -ENOMEM;
+               }
        }
 
-       dsaf_dev->cpld_base = of_iomap(np, 4);
-       if (!dsaf_dev->cpld_base)
-               dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
-
        ret = of_property_read_u32(np, "desc-num", &desc_num);
        if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
            desc_num > HNS_DSAF_MAX_DESC_CNT) {
@@ -118,6 +151,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        }
        dsaf_dev->desc_num = desc_num;
 
+       ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+       if (ret < 0) {
+               dev_dbg(dsaf_dev->dev,
+                       "get reset-field-offset fail, ret=%d!\r\n", ret);
+       }
+       dsaf_dev->reset_offset = reset_offset;
+
        ret = of_property_read_u32(np, "buf-size", &buf_size);
        if (ret < 0) {
                dev_err(dsaf_dev->dev,
@@ -149,8 +189,6 @@ unmap_base_addr:
                iounmap(dsaf_dev->sds_base);
        if (dsaf_dev->sc_base)
                iounmap(dsaf_dev->sc_base);
-       if (dsaf_dev->cpld_base)
-               iounmap(dsaf_dev->cpld_base);
        return ret;
 }
 
@@ -167,9 +205,6 @@ static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
 
        if (dsaf_dev->sc_base)
                iounmap(dsaf_dev->sc_base);
-
-       if (dsaf_dev->cpld_base)
-               iounmap(dsaf_dev->cpld_base);
 }
 
 /**
@@ -217,9 +252,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
        u32 q_id, q_num_per_port;
        u32 i;
 
-       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
-                              HNS_DSAF_COMM_SERVICE_NW_IDX,
-                              &max_vfn, &max_q_per_vf);
+       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
        q_num_per_port = max_vfn * max_q_per_vf;
 
        for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
@@ -239,9 +272,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
        if (AE_IS_VER1(dsaf_dev->dsaf_ver))
                return;
 
-       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
-                              HNS_DSAF_COMM_SERVICE_NW_IDX,
-                              &max_vfn, &max_q_per_vf);
+       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
        q_num_per_port = max_vfn * max_q_per_vf;
 
        for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
@@ -712,13 +743,15 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
 
 void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
 {
-       dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+               dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+                                DSAF_CFG_MIX_MODE_S, !!en);
 }
 
 void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
 {
        if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
-           dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+           dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
                return;
 
        dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
@@ -1307,6 +1340,9 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
        u32 i;
        int ret;
 
+       if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+               return 0;
+
        ret = hns_dsaf_init_hw(dsaf_dev);
        if (ret)
                return ret;
index e8eedc57129644d27c83e1eaf5f3e77cbbc11fbf..f0502ba0a677674493d36957d269967d72bd9826 100644 (file)
@@ -41,6 +41,7 @@ struct hns_mac_cb;
 #define DSAF_STATIC_NUM 28
 
 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
 
 enum hal_dsaf_mode {
        HRD_DSAF_NO_DSAF_MODE   = 0x0,
@@ -117,6 +118,7 @@ enum dsaf_mode {
        DSAF_MODE_ENABLE_32VM,  /**< en DSAF-mode, support 32 VM */
        DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
        DSAF_MODE_ENABLE,               /**< before is enable DSAF mode*/
+       DSAF_MODE_DISABLE_SP,   /* <non-dsaf, single port mode */
        DSAF_MODE_DISABLE_FIX,  /**< non-dasf, fixed to queue*/
        DSAF_MODE_DISABLE_2PORT_8VM,    /**< non-dasf, 2port 8VM */
        DSAF_MODE_DISABLE_2PORT_16VM,   /**< non-dasf, 2port 16VM */
@@ -275,10 +277,12 @@ struct dsaf_device {
        u8 __iomem *sds_base;
        u8 __iomem *ppe_base;
        u8 __iomem *io_base;
-       u8 __iomem *cpld_base;
+       struct regmap *sub_ctrl;
+       phys_addr_t ppe_paddr;
 
        u32 desc_num; /*  desc num per queue*/
        u32 buf_size; /*  ring buffer size */
+       u32 reset_offset; /* reset field offset in sub sysctrl */
        int buf_size_type; /* ring buffer size-type */
        enum dsaf_mode dsaf_mode;        /* dsaf mode  */
        enum hal_dsaf_mode dsaf_en;
@@ -287,7 +291,7 @@ struct dsaf_device {
 
        struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
        struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
-       struct hns_mac_cb *mac_cb;
+       struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
 
        struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
        struct dsaf_int_stat int_stat;
@@ -359,14 +363,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
                           tab_line_addr);
 }
 
-static inline int hns_dsaf_get_comm_idx_by_port(int port)
-{
-       if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
-               return 0;
-       else
-               return (port - DSAF_COMM_CHN + 1);
-}
-
 static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
        struct hnae_handle *handle)
 {
index e69b02287c44a78fcdd960948f7f29bc96152bb3..a837bb9e3839fa3bb1c05410531e94d96f201a1e 100644 (file)
@@ -7,10 +7,30 @@
  * (at your option) any later version.
  */
 
-#include "hns_dsaf_misc.h"
 #include "hns_dsaf_mac.h"
-#include "hns_dsaf_reg.h"
+#include "hns_dsaf_misc.h"
 #include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+       if (dsaf_dev->sub_ctrl)
+               dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+       else
+               dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+       u32 ret;
+
+       if (dsaf_dev->sub_ctrl)
+               ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
+       else
+               ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+
+       return ret;
+}
 
 void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
                      u16 speed, int data)
@@ -22,8 +42,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
                pr_err("sfp_led_opt mac_dev is null!\n");
                return;
        }
-       if (!mac_cb->cpld_vaddr) {
-               dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+       if (!mac_cb->cpld_ctrl) {
+               dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
                        mac_cb->mac_id);
                return;
        }
@@ -40,21 +60,24 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
                dsaf_set_bit(value, DSAF_LED_DATA_B, data);
 
                if (value != mac_cb->cpld_led_value) {
-                       dsaf_write_b(mac_cb->cpld_vaddr, value);
+                       dsaf_write_syscon(mac_cb->cpld_ctrl,
+                                         mac_cb->cpld_ctrl_reg, value);
                        mac_cb->cpld_led_value = value;
                }
        } else {
-               dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+               dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+                                 CPLD_LED_DEFAULT_VALUE);
                mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
        }
 }
 
 void cpld_led_reset(struct hns_mac_cb *mac_cb)
 {
-       if (!mac_cb || !mac_cb->cpld_vaddr)
+       if (!mac_cb || !mac_cb->cpld_ctrl)
                return;
 
-       dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+       dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+                         CPLD_LED_DEFAULT_VALUE);
        mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
 }
 
@@ -63,15 +86,19 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 {
        switch (status) {
        case HNAE_LED_ACTIVE:
-               mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+               mac_cb->cpld_led_value =
+                       dsaf_read_syscon(mac_cb->cpld_ctrl,
+                                        mac_cb->cpld_ctrl_reg);
                dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
                             CPLD_LED_ON_VALUE);
-               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+               dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+                                 mac_cb->cpld_led_value);
                return 2;
        case HNAE_LED_INACTIVE:
                dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
                             CPLD_LED_DEFAULT_VALUE);
-               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+               dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+                                 mac_cb->cpld_led_value);
                break;
        default:
                break;
@@ -95,10 +122,8 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
                nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
        }
 
-       dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
-                      RESET_REQ_OR_DREQ);
-       dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
-                      RESET_REQ_OR_DREQ);
+       dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+       dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
 }
 
 void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
@@ -110,14 +135,14 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
                return;
 
        reg_val |= RESET_REQ_OR_DREQ;
-       reg_val |= 0x2082082 << port;
+       reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
 
        if (val == 0)
                reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
        else
                reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
 
-       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+       dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
@@ -129,68 +154,63 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
        if (port >= DSAF_XGE_NUM)
                return;
 
-       reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+       reg_val |= XGMAC_TRX_CORE_SRST_M
+               << dsaf_dev->mac_cb[port]->port_rst_off;
 
        if (val == 0)
                reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
        else
                reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
 
-       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+       dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
 {
        u32 reg_val_1;
        u32 reg_val_2;
+       u32 port_rst_off;
 
        if (port >= DSAF_GE_NUM)
                return;
 
-       if (port < DSAF_SERVICE_NW_NUM) {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
                reg_val_1  = 0x1 << port;
+               port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
                /* there is difference between V1 and V2 in register.*/
                if (AE_IS_VER1(dsaf_dev->dsaf_ver))
-                       reg_val_2  = 0x1041041 << port;
+                       reg_val_2  = 0x1041041 << port_rst_off;
                else
-                       reg_val_2  = 0x2082082 << port;
+                       reg_val_2  = 0x2082082 << port_rst_off;
 
                if (val == 0) {
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_REQ1_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
                                       reg_val_1);
 
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_REQ0_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
                                       reg_val_2);
                } else {
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
                                       reg_val_2);
 
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
                                       reg_val_1);
                }
        } else {
-               reg_val_1 = 0x15540 << (port - 6);
-               reg_val_2 = 0x100 << (port - 6);
+               reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
+               reg_val_2 = 0x100 << dsaf_dev->reset_offset;
 
                if (val == 0) {
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_REQ1_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
                                       reg_val_1);
 
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_PPE_RESET_REQ_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
                                       reg_val_2);
                } else {
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
                                       reg_val_1);
 
-                       dsaf_write_reg(dsaf_dev->sc_base,
-                                      DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+                       dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
                                       reg_val_2);
                }
        }
@@ -201,24 +221,23 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
        u32 reg_val = 0;
        u32 reg_addr;
 
-       reg_val |= RESET_REQ_OR_DREQ << port;
+       reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
 
        if (val == 0)
                reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
        else
                reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
 
-       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+       dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
 {
-       int comm_index = ppe_common->comm_index;
        struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
        u32 reg_val;
        u32 reg_addr;
 
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
                reg_val = RESET_REQ_OR_DREQ;
                if (val == 0)
                        reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
@@ -226,7 +245,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
                        reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
 
        } else {
-               reg_val = 0x100 << (comm_index - 1);
+               reg_val = 0x100 << dsaf_dev->reset_offset;
 
                if (val == 0)
                        reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
@@ -234,7 +253,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
                        reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
        }
 
-       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+       dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 /**
@@ -246,36 +265,45 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
 {
        u32 mode;
        u32 reg;
-       u32 shift;
        bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
-       void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
        int mac_id = mac_cb->mac_id;
-       phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+       phy_interface_t phy_if;
 
-       if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
-               phy_if = PHY_INTERFACE_MODE_SGMII;
-       } else if (mac_id >= 0 && mac_id <= 3) {
-               reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
-               mode = dsaf_read_reg(sys_ctl_vaddr, reg);
-               /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
-               shift = is_ver1 ? 0 : mac_id;
-               if (dsaf_get_bit(mode, shift))
-                       phy_if = PHY_INTERFACE_MODE_XGMII;
+       if (is_ver1) {
+               if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+                       return PHY_INTERFACE_MODE_SGMII;
+
+               if (mac_id >= 0 && mac_id <= 3)
+                       reg = HNS_MAC_HILINK4_REG;
                else
-                       phy_if = PHY_INTERFACE_MODE_SGMII;
-       } else if (mac_id >= 4 && mac_id <= 7) {
-               reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
-               mode = dsaf_read_reg(sys_ctl_vaddr, reg);
-               /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
-               shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
-               if (dsaf_get_bit(mode, shift))
-                       phy_if = PHY_INTERFACE_MODE_XGMII;
+                       reg = HNS_MAC_HILINK3_REG;
+       } else{
+               if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+                       reg = HNS_MAC_HILINK4V2_REG;
                else
-                       phy_if = PHY_INTERFACE_MODE_SGMII;
+                       reg = HNS_MAC_HILINK3V2_REG;
        }
+
+       mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+       if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+               phy_if = PHY_INTERFACE_MODE_XGMII;
+       else
+               phy_if = PHY_INTERFACE_MODE_SGMII;
+
        return phy_if;
 }
 
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+       if (!mac_cb->cpld_ctrl)
+               return -ENODEV;
+
+       *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
+                                       + MAC_SFP_PORT_OFFSET);
+
+       return 0;
+}
+
 /**
  * hns_mac_config_sds_loopback - set loop back for serdes
  * @mac_cb: mac control block
@@ -312,7 +340,14 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
                                pr_info("no sfp in this eth\n");
        }
 
-       dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+       if (mac_cb->serdes_ctrl) {
+               u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+               dsaf_set_field(origin, 1ull << 10, 10, !!en);
+               dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+       } else {
+               dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+       }
 
        return 0;
 }
index ab27b3b14ca3f7b761875f982aa511de89f15ef7..8cd151a5245ea1fba71cd2f5e1f14c3dfb272c1f 100644 (file)
@@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
        }
 }
 
-static void __iomem *hns_ppe_common_get_ioaddr(
-       struct ppe_common_cb *ppe_common)
+static void __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
-       void __iomem *base_addr;
-
-       int idx = ppe_common->comm_index;
-
-       if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
-               base_addr = ppe_common->dsaf_dev->ppe_base
-                       + PPE_COMMON_REG_OFFSET;
-       else
-               base_addr = ppe_common->dsaf_dev->sds_base
-                       + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
-                       + PPE_COMMON_REG_OFFSET;
-
-       return base_addr;
+       return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
 }
 
 /**
@@ -90,7 +78,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
        struct ppe_common_cb *ppe_common;
        int ppe_num;
 
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
                ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
        else
                ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
@@ -103,7 +91,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
        ppe_common->ppe_num = ppe_num;
        ppe_common->dsaf_dev = dsaf_dev;
        ppe_common->comm_index = comm_index;
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+       if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
                ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
        else
                ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
@@ -124,32 +112,8 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
 static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
                                        int ppe_idx)
 {
-       void __iomem *base_addr;
-       int common_idx = ppe_common->comm_index;
-
-       if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
-               base_addr = ppe_common->dsaf_dev->ppe_base +
-                       ppe_idx * PPE_REG_OFFSET;
-
-       } else {
-               base_addr = ppe_common->dsaf_dev->sds_base +
-                       (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
-       }
 
-       return base_addr;
-}
-
-static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
-{
-       int port;
-
-       if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
-               port = idx;
-       else
-               port = HNS_PPE_SERVICE_NW_ENGINE_NUM
-                       + ppe_common->comm_index - 1;
-
-       return port;
+       return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
 
 static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
@@ -164,7 +128,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
                ppe_cb->next = NULL;
                ppe_cb->ppe_common_cb = ppe_common;
                ppe_cb->index = i;
-               ppe_cb->port = hns_ppe_get_port(ppe_common, i);
                ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
                ppe_cb->virq = 0;
        }
@@ -318,7 +281,7 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
 static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
 {
        struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
-       u32 port = ppe_cb->port;
+       u32 port = ppe_cb->index;
        struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
        int i;
 
@@ -377,7 +340,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
        u32 i;
 
        for (i = 0; i < ppe_common->ppe_num; i++) {
-               hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+               if (ppe_common->dsaf_dev->mac_cb[i])
+                       hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
                memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
        }
 }
@@ -410,8 +374,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
        if (ret)
                return;
 
-       for (i = 0; i < ppe_common->ppe_num; i++)
-               hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+       for (i = 0; i < ppe_common->ppe_num; i++) {
+               /* We only need to initiate ppe when the port exists */
+               if (dsaf_dev->mac_cb[i])
+                       hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+       }
 
        ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
        if (ret)
index e9c0ec2fa0ddaea6519e0b3baf59144923f71610..9d8e643e8aa6ff518ab68d4af4c42d292a37ae47 100644 (file)
@@ -80,7 +80,6 @@ struct hns_ppe_cb {
        struct hns_ppe_hw_stats hw_stats;
 
        u8 index;       /* index in a ppe common device */
-       u8 port;                         /* port id in dsaf  */
        void __iomem *io_base;
        int virq;
        u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
index 28ee26e5c47829c6baa8a2da65db6e05d6f3eba0..4ef6d23d998e36d17c9331f20822d694f988be20 100644 (file)
@@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout(
 
 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
 {
-       if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+       if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
                return HNS_RCB_SERVICE_NW_ENGINE_NUM;
        else
                return HNS_RCB_DEBUG_NW_ENGINE_NUM;
@@ -430,36 +430,20 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
 static int hns_rcb_get_port_in_comm(
        struct rcb_common_cb *rcb_common, int ring_idx)
 {
-       int comm_index = rcb_common->comm_index;
-       int port;
-       int q_num;
 
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-               q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
-               port = ring_idx / q_num;
-       } else {
-               port = 0; /* config debug-ports port_id_in_comm to 0*/
-       }
-
-       return port;
+       return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
 }
 
 #define SERVICE_RING_IRQ_IDX(v1) \
        ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_IDX(v1) \
-       ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_OFFSET(v1) \
-       ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET)
 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
 {
-       int comm_index = rcb_common->comm_index;
        bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
 
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+       if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
                return SERVICE_RING_IRQ_IDX(is_ver1);
        else
-               return  DEBUG_RING_IRQ_IDX(is_ver1) +
-                       (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1);
+               return  HNS_DEBUG_RING_IRQ_IDX;
 }
 
 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
@@ -549,7 +533,7 @@ int hns_rcb_set_coalesce_usecs(
                return 0;
 
        if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
-               if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
                        dev_err(rcb_common->dsaf_dev->dev,
                                "error: not support coalesce_usecs setting!\n");
                        return -EINVAL;
@@ -601,113 +585,82 @@ int hns_rcb_set_coalesced_frames(
  *@max_vfn : max vfn number
  *@max_q_per_vf:max ring number per vm
  */
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
-                           u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+                           u16 *max_q_per_vf)
 {
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-               switch (dsaf_mode) {
-               case DSAF_MODE_DISABLE_6PORT_0VM:
-                       *max_vfn = 1;
-                       *max_q_per_vf = 16;
-                       break;
-               case DSAF_MODE_DISABLE_FIX:
-                       *max_vfn = 1;
-                       *max_q_per_vf = 1;
-                       break;
-               case DSAF_MODE_DISABLE_2PORT_64VM:
-                       *max_vfn = 64;
-                       *max_q_per_vf = 1;
-                       break;
-               case DSAF_MODE_DISABLE_6PORT_16VM:
-                       *max_vfn = 16;
-                       *max_q_per_vf = 1;
-                       break;
-               default:
-                       *max_vfn = 1;
-                       *max_q_per_vf = 16;
-                       break;
-               }
-       } else {
+       switch (dsaf_mode) {
+       case DSAF_MODE_DISABLE_6PORT_0VM:
+               *max_vfn = 1;
+               *max_q_per_vf = 16;
+               break;
+       case DSAF_MODE_DISABLE_FIX:
+       case DSAF_MODE_DISABLE_SP:
                *max_vfn = 1;
                *max_q_per_vf = 1;
+               break;
+       case DSAF_MODE_DISABLE_2PORT_64VM:
+               *max_vfn = 64;
+               *max_q_per_vf = 1;
+               break;
+       case DSAF_MODE_DISABLE_6PORT_16VM:
+               *max_vfn = 16;
+               *max_q_per_vf = 1;
+               break;
+       default:
+               *max_vfn = 1;
+               *max_q_per_vf = 16;
+               break;
        }
 }
 
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
 {
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-               switch (dsaf_dev->dsaf_mode) {
-               case DSAF_MODE_ENABLE_FIX:
-                       return 1;
-
-               case DSAF_MODE_DISABLE_FIX:
-                       return 6;
-
-               case DSAF_MODE_ENABLE_0VM:
-                       return 32;
-
-               case DSAF_MODE_DISABLE_6PORT_0VM:
-               case DSAF_MODE_ENABLE_16VM:
-               case DSAF_MODE_DISABLE_6PORT_2VM:
-               case DSAF_MODE_DISABLE_6PORT_16VM:
-               case DSAF_MODE_DISABLE_6PORT_4VM:
-               case DSAF_MODE_ENABLE_8VM:
-                       return 96;
-
-               case DSAF_MODE_DISABLE_2PORT_16VM:
-               case DSAF_MODE_DISABLE_2PORT_8VM:
-               case DSAF_MODE_ENABLE_32VM:
-               case DSAF_MODE_DISABLE_2PORT_64VM:
-               case DSAF_MODE_ENABLE_128VM:
-                       return 128;
-
-               default:
-                       dev_warn(dsaf_dev->dev,
-                                "get ring num fail,use default!dsaf_mode=%d\n",
-                                dsaf_dev->dsaf_mode);
-                       return 128;
-               }
-       } else {
+       switch (dsaf_dev->dsaf_mode) {
+       case DSAF_MODE_ENABLE_FIX:
+       case DSAF_MODE_DISABLE_SP:
                return 1;
+
+       case DSAF_MODE_DISABLE_FIX:
+               return 6;
+
+       case DSAF_MODE_ENABLE_0VM:
+               return 32;
+
+       case DSAF_MODE_DISABLE_6PORT_0VM:
+       case DSAF_MODE_ENABLE_16VM:
+       case DSAF_MODE_DISABLE_6PORT_2VM:
+       case DSAF_MODE_DISABLE_6PORT_16VM:
+       case DSAF_MODE_DISABLE_6PORT_4VM:
+       case DSAF_MODE_ENABLE_8VM:
+               return 96;
+
+       case DSAF_MODE_DISABLE_2PORT_16VM:
+       case DSAF_MODE_DISABLE_2PORT_8VM:
+       case DSAF_MODE_ENABLE_32VM:
+       case DSAF_MODE_DISABLE_2PORT_64VM:
+       case DSAF_MODE_ENABLE_128VM:
+               return 128;
+
+       default:
+               dev_warn(dsaf_dev->dev,
+                        "get ring num fail,use default!dsaf_mode=%d\n",
+                        dsaf_dev->dsaf_mode);
+               return 128;
        }
 }
 
-void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
-                                      int comm_index)
+void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
-       void __iomem *base_addr;
-
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
-               base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
-       else
-               base_addr = dsaf_dev->sds_base
-                       + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
-                       + RCB_COMMON_REG_OFFSET;
+       struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
-       return base_addr;
+       return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
 }
 
-static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
-                                           int comm_index)
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
 {
-       struct device_node *np = dsaf_dev->dev->of_node;
-       phys_addr_t phy_addr;
-       const __be32 *tmp_addr;
-       u64 addr_offset = 0;
-       u64 size = 0;
-       int index = 0;
-
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-               index    = 2;
-               addr_offset = RCB_COMMON_REG_OFFSET;
-       } else {
-               index    = 1;
-               addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
-                               RCB_COMMON_REG_OFFSET;
-       }
-       tmp_addr  = of_get_address(np, index, &size, NULL);
-       phy_addr  = of_translate_address(np, tmp_addr);
-       return phy_addr + addr_offset;
+       struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+       return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
 }
 
 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
@@ -717,7 +670,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
        enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
        u16 max_vfn;
        u16 max_q_per_vf;
-       int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+       int ring_num = hns_rcb_get_ring_num(dsaf_dev);
 
        rcb_common =
                devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
@@ -732,12 +685,12 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
 
        rcb_common->desc_num = dsaf_dev->desc_num;
 
-       hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+       hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
        rcb_common->max_vfn = max_vfn;
        rcb_common->max_q_per_vf = max_q_per_vf;
 
-       rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
-       rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+       rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+       rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
 
        dsaf_dev->rcb_common[comm_index] = rcb_common;
        return 0;
@@ -932,7 +885,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
 {
        u32 *regs = data;
        bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
-       bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+       bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
        u32 reg_tmp;
        u32 reg_num_tmp;
        u32 i = 0;
index eb61014ad615425c112bc3b22c24e022aa8a4bd4..bd54dac82ee0dcaea38750a13e61cdb855006346 100644 (file)
@@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
 void hns_rcb_start(struct hnae_queue *q, u32 val);
 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
                            u16 *max_vfn, u16 *max_q_per_vf);
 
 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
index 7ff195e60b0288d84753132af3aa84012db83859..7c3b5103d1513ec217e50c2594721736f664e35c 100644 (file)
 #ifndef _DSAF_REG_H_
 #define _DSAF_REG_H_
 
-#define HNS_DEBUG_RING_IRQ_IDX 55
-#define HNS_SERVICE_RING_IRQ_IDX 59
-#define HNS_DEBUG_RING_IRQ_OFFSET 2
-#define HNSV2_DEBUG_RING_IRQ_IDX 409
-#define HNSV2_SERVICE_RING_IRQ_IDX 25
-#define HNSV2_DEBUG_RING_IRQ_OFFSET 9
-
-#define DSAF_MAX_PORT_NUM_PER_CHIP 8
-#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
-#define DSAF_MAX_VM_NUM 128
-
-#define DSAF_COMM_DEV_NUM 3
-#define DSAF_PPE_INODE_BASE 6
-#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX         0
+#define HNS_SERVICE_RING_IRQ_IDX       59
+#define HNSV2_SERVICE_RING_IRQ_IDX     25
+
+#define DSAF_MAX_PORT_NUM      6
+#define DSAF_MAX_VM_NUM                128
+
+#define DSAF_COMM_DEV_NUM      1
+#define DSAF_PPE_INODE_BASE    6
 #define DSAF_DEBUG_NW_NUM      2
 #define DSAF_SERVICE_NW_NUM    6
 #define DSAF_COMM_CHN          DSAF_SERVICE_NW_NUM
 #define DSAF_GE_NUM            ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
-#define DSAF_PORT_NUM          ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
 #define DSAF_XGE_NUM           DSAF_SERVICE_NW_NUM
 #define DSAF_PORT_TYPE_NUM 3
 #define DSAF_NODE_NUM          18
@@ -994,6 +989,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
        return readl(reg_addr + reg);
 }
 
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+       regmap_write(base, reg, value);
+}
+
+static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+{
+       unsigned int val;
+
+       regmap_read(base, reg, &val);
+       return val;
+}
+
 #define dsaf_read_dev(a, reg) \
        dsaf_read_reg((a)->io_base, (reg))
 
index 687204b780b02143486031971e7fe8b6af165f29..e621636e69b9e6b1f45a682e67027f1fb0003d21 100644 (file)
@@ -1275,7 +1275,7 @@ void hns_nic_net_reinit(struct net_device *netdev)
 {
        struct hns_nic_priv *priv = netdev_priv(netdev);
 
-       priv->netdev->trans_start = jiffies;
+       netif_trans_update(priv->netdev);
        while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
                usleep_range(1000, 2000);
 
@@ -1376,7 +1376,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
        ret = hns_nic_net_xmit_hw(ndev, skb,
                                  &tx_ring_data(priv, skb->queue_mapping));
        if (ret == NETDEV_TX_OK) {
-               ndev->trans_start = jiffies;
+               netif_trans_update(ndev);
                ndev->stats.tx_bytes += skb->len;
                ndev->stats.tx_packets++;
        }
@@ -1648,7 +1648,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
 
        rtnl_lock();
        /* put off any impending NetWatchDogTimeout */
-       priv->netdev->trans_start = jiffies;
+       netif_trans_update(priv->netdev);
 
        if (type == HNAE_PORT_DEBUG) {
                hns_nic_net_reinit(priv->netdev);
@@ -1873,6 +1873,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
        struct net_device *ndev;
        struct hns_nic_priv *priv;
        struct device_node *node = dev->of_node;
+       u32 port_id;
        int ret;
 
        ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
@@ -1896,10 +1897,18 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
                dev_err(dev, "not find ae-handle\n");
                goto out_read_prop_fail;
        }
-
-       ret = of_property_read_u32(node, "port-id", &priv->port_id);
-       if (ret)
-               goto out_read_prop_fail;
+       /* try to find port-idx-in-ae first */
+       ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+       if (ret) {
+               /* only for old code compatible */
+               ret = of_property_read_u32(node, "port-id", &port_id);
+               if (ret)
+                       goto out_read_prop_fail;
+               /* for old dts, we need to caculate the port offset */
+               port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+                       : port_id - HNS_SRV_OFFSET;
+       }
+       priv->port_id = port_id;
 
        hns_init_mac_addr(ndev);
 
index c68ab3d34fc224ef00f43532f929fbd533b2ee80..337efa582bacebe4eb91e26593a4435a6604b2cb 100644 (file)
@@ -18,6 +18,9 @@
 
 #include "hnae.h"
 
+#define HNS_DEBUG_OFFSET       6
+#define HNS_SRV_OFFSET         2
+
 enum hns_nic_state {
        NIC_STATE_TESTING = 0,
        NIC_STATE_RESETTING,
index 3daf2d4a7ca057a66e9ad797baa581c0d92caeba..631dbc7b4dbb9b95985a8b4a2431cc648ef74644 100644 (file)
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
                return -EAGAIN;
        }
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_start_queue(dev);
 
        lp->lan_type = hp100_sense_lan(dev);
index 7ce6379fd1a362cfbad846f7881cdae0d03e6982..befb4ac3e2b0eaa09eaf2909cbcdcf57fd39c3a4 100644 (file)
@@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev)
                lp->last_restart = dev->stats.tx_packets;
        }
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue (dev);
 }
 
index c984998b34a02dfe4095be9479a71eb0326a1fbb..3dbc53c21baa4b02a6f4e16fbaab50d9ea09ba17 100644 (file)
@@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev)
                lp->last_restart = dev->stats.tx_packets;
        }
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue (dev);
 }
 
index 353f57f675d0211b378bc15c7924446fd52d974c..21c84cc9c871d5fd66d05b104426cf7252a1000a 100644 (file)
@@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev)
                p->scb->cmd_cuc = CUC_START;
                sun3_attn586();
                WAIT_4_SCB_CMD();
-               dev->trans_start = jiffies; /* prevent tx timeout */
+               netif_trans_update(dev); /* prevent tx timeout */
                return 0;
        }
 #endif
@@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev)
                sun3_82586_close(dev);
                sun3_82586_open(dev);
        }
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 }
 
 /******************************************************
index 5d7db6c01c46c04adcf0b12b49e67610c0d67ca7..4c9771d57d6e7fce3a77e093e467fe2d0342243e 100644 (file)
@@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev)
        dev->no_mcast = 1;
        netif_addr_unlock(dev->ndev);
        netif_tx_unlock_bh(dev->ndev);
-       dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
+       netif_trans_update(dev->ndev);  /* prevent tx timeout */
        mal_poll_disable(dev->mal, &dev->commac);
        netif_tx_disable(dev->ndev);
 }
@@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
                DBG2(dev, "stopped TX queue" NL);
        }
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        ++dev->stats.tx_packets;
        dev->stats.tx_bytes += len;
 
index d3b9d103353ed12fd6f05caabc8a517687d4dce8..5b88cc690c22c1ce8e123cdb3c0d23908414f8df 100644 (file)
@@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = {
        .ops            = &m88e1112_phy_ops,
 };
 
+static int ar8035_init(struct mii_phy *phy)
+{
+       phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+       phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+       phy_write(phy, 0x1d, 0xb);    /* Address hib ctrl */
+       phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+       return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+       .init           = ar8035_init,
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+       .phy_id         = 0x004dd070,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Atheros 8035 Gigabit Ethernet",
+       .ops            = &ar8035_phy_ops,
+};
+
 static struct mii_phy_def *mii_phy_table[] = {
        &et1011c_phy_def,
        &cis8201_phy_def,
        &bcm5248_phy_def,
        &m88e1111_phy_def,
        &m88e1112_phy_def,
+       &ar8035_phy_def,
        &genmii_phy_def,
        NULL
 };
index 6de0c7df56fad32f931472de277fb22a0964853c..f42129d09e2c23ba9fdb5cde890d50ecb7166a42 100644 (file)
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
        return __e1000_maybe_stop_tx(netdev, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
+#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                             nr_frags, mss);
 
        if (count) {
+               /* The descriptors needed is higher than other Intel drivers
+                * due to a number of workarounds.  The breakdown is below:
+                * Data descriptors: MAX_SKB_FRAGS + 1
+                * Context Descriptor: 1
+                * Keep head from touching tail: 2
+                * Workarounds: 3
+                */
+               int desc_needed = MAX_SKB_FRAGS + 7;
+
                netdev_sent_queue(netdev, skb->len);
                skb_tx_timestamp(skb);
 
                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+
+               /* 82544 potentially requires twice as many data descriptors
+                * in order to guarantee buffers don't end on evenly-aligned
+                * dwords
+                */
+               if (adapter->pcix_82544)
+                       desc_needed += MAX_SKB_FRAGS + 1;
+
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+               e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
 
                if (!skb->xmit_more ||
                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
index a7f16c35ebcd26ef1197b15513491334865cc4af..269087cb7b963ae81cb1bafec5ed98a11f6c83c4 100644 (file)
@@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
                dev_info(&adapter->pdev->dev, "Net device Info\n");
                pr_info("Device Name     state            trans_start      last_rx\n");
                pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-                       netdev->state, netdev->trans_start, netdev->last_rx);
+                       netdev->state, dev_trans_start(netdev), netdev->last_rx);
        }
 
        /* Print Registers */
index b006ff66d028644243393229b5e231ae47301127..cac645329cea19a6bd25c665f6f0327d7ec3b85f 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
 ################################################################################
 
 #
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
 #
 
 obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o
 fm10k-y := fm10k_main.o \
           fm10k_common.o \
           fm10k_pci.o \
-          fm10k_ptp.o \
           fm10k_netdev.o \
           fm10k_ethtool.o \
           fm10k_pf.o \
index 9c7fafef7cf6bd49953b311bb98127ffd29258bf..fcf106e545c518d9be64a875c3b2356c8ac32d78 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
 #include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
 
 #include "fm10k_pf.h"
 #include "fm10k_vf.h"
@@ -333,6 +330,7 @@ struct fm10k_intfc {
        unsigned long last_reset;
        unsigned long link_down_event;
        bool host_ready;
+       bool lport_map_failed;
 
        u32 reta[FM10K_RETA_SIZE];
        u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@ struct fm10k_intfc {
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *dbg_intfc;
-
 #endif /* CONFIG_DEBUG_FS */
-       struct ptp_clock_info ptp_caps;
-       struct ptp_clock *ptp_clock;
-
-       struct sk_buff_head ts_tx_skb_queue;
-       u32 tx_hwtstamp_timeouts;
 
-       struct hwtstamp_config ts_config;
-       /* We are unable to actually adjust the clock beyond the frequency
-        * value.  Once the clock is started there is no resetting it.  As
-        * such we maintain a separate offset from the actual hardware clock
-        * to allow for offset adjustment.
-        */
-       s64 ptp_adjust;
-       rwlock_t systime_lock;
 #ifdef CONFIG_DCB
        u8 pfc_en;
 #endif
@@ -546,21 +530,6 @@ static inline void fm10k_dbg_init(void) {}
 static inline void fm10k_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS */
 
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
-                              struct skb_shared_hwtstamps *hwtstamp,
-                              u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
-                         u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
 /* DCB */
 #ifdef CONFIG_DCB
 void fm10k_dcbnl_set_ops(struct net_device *dev);
index 6cfae6ac04eac0984037e1620394f14bc85d3fce..5bbf19cfe29b64a971746ca5fa573366f4f80720 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 45e4e5b1f20a83e809560e8b868657df064f3f48..50f71e997448e7104780c51adea3f633edc039f6 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 2be4361839db2d270e13601b039953293bd96755..db4bd8bf9722420eb8979108add209d75c9fadd3 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 5d6137faf7d17e31504d15eda87a814d1e9a132b..5116fd043630999bd9de724666d2b0d60077d5ba 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index a23748777b1b9a42cf6406b463735a4bd8c18f9f..9c0d87503977fa577b457ac48981a489527f2f72 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -77,19 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
        FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
 
        FM10K_STAT("tx_hang_count", tx_timeout_count),
-
-       FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
-
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
-       FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
-       FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
-       FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
-       FM10K_STAT("rx_switch_errors", rx_switch_errors),
-       FM10K_STAT("rx_drops", rx_drops),
-       FM10K_STAT("rx_pp_errors", rx_pp_errors),
-       FM10K_STAT("rx_link_errors", rx_link_errors),
-       FM10K_STAT("rx_length_errors", rx_length_errors),
 };
 
 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +108,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
        FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
 };
 
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+       .stat_string = _name, \
+       .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+       .stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+       FM10K_QUEUE_STAT("packets", stats.packets),
+       FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
-       ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
 
 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
                                FM10K_NETDEV_STATS_LEN + \
@@ -145,12 +140,10 @@ enum fm10k_self_test_types {
 };
 
 enum {
-       FM10K_PRV_FLAG_DEBUG_STATS,
        FM10K_PRV_FLAG_LEN,
 };
 
 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
-       "debug-statistics",
 };
 
 static void fm10k_add_stat_strings(char **p, const char *prefix,
@@ -169,7 +162,6 @@ static void fm10k_add_stat_strings(char **p, const char *prefix,
 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
-       struct fm10k_iov_data *iov_data = interface->iov_data;
        char *p = (char *)data;
        unsigned int i;
 
@@ -179,10 +171,6 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
        fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
                               FM10K_GLOBAL_STATS_LEN);
 
-       if (interface->flags & FM10K_FLAG_DEBUG_STATS)
-               fm10k_add_stat_strings(&p, "", fm10k_gstrings_debug_stats,
-                                      FM10K_DEBUG_STATS_LEN);
-
        fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
                               FM10K_MBX_STATS_LEN);
 
@@ -190,26 +178,18 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
                fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
                                       FM10K_PF_STATS_LEN);
 
-       if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
-               for (i = 0; i < iov_data->num_vfs; i++) {
-                       char prefix[ETH_GSTRING_LEN];
+       for (i = 0; i < interface->hw.mac.max_queues; i++) {
+               char prefix[ETH_GSTRING_LEN];
 
-                       snprintf(prefix, ETH_GSTRING_LEN, "vf_%u_", i);
-                       fm10k_add_stat_strings(&p, prefix,
-                                              fm10k_gstrings_mbx_stats,
-                                              FM10K_MBX_STATS_LEN);
-               }
-       }
+               snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+               fm10k_add_stat_strings(&p, prefix,
+                                      fm10k_gstrings_queue_stats,
+                                      FM10K_QUEUE_STATS_LEN);
 
-       for (i = 0; i < interface->hw.mac.max_queues; i++) {
-               snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
-               p += ETH_GSTRING_LEN;
-               snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
-               p += ETH_GSTRING_LEN;
-               snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
-               p += ETH_GSTRING_LEN;
-               snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
-               p += ETH_GSTRING_LEN;
+               snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+               fm10k_add_stat_strings(&p, prefix,
+                                      fm10k_gstrings_queue_stats,
+                                      FM10K_QUEUE_STATS_LEN);
        }
 }
 
@@ -236,7 +216,6 @@ static void fm10k_get_strings(struct net_device *dev,
 static int fm10k_get_sset_count(struct net_device *dev, int sset)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
-       struct fm10k_iov_data *iov_data = interface->iov_data;
        struct fm10k_hw *hw = &interface->hw;
        int stats_len = FM10K_STATIC_STATS_LEN;
 
@@ -244,19 +223,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_TEST:
                return FM10K_TEST_LEN;
        case ETH_SS_STATS:
-               stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+               stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
 
                if (hw->mac.type != fm10k_mac_vf)
                        stats_len += FM10K_PF_STATS_LEN;
 
-               if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
-                       stats_len += FM10K_DEBUG_STATS_LEN;
-
-                       if (iov_data)
-                               stats_len += FM10K_MBX_STATS_LEN *
-                                       iov_data->num_vfs;
-               }
-
                return stats_len;
        case ETH_SS_PRIV_FLAGS:
                return FM10K_PRV_FLAG_LEN;
@@ -272,9 +243,10 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
        unsigned int i;
        char *p;
 
-       /* simply skip forward if we were not given a valid pointer */
        if (!pointer) {
-               *data += size;
+               /* memory is not zero allocated so we have to clear it */
+               for (i = 0; i < size; i++)
+                       *((*data)++) = 0;
                return;
        }
 
@@ -304,11 +276,9 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
                                    struct ethtool_stats __always_unused *stats,
                                    u64 *data)
 {
-       const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
        struct fm10k_intfc *interface = netdev_priv(netdev);
-       struct fm10k_iov_data *iov_data = interface->iov_data;
        struct net_device_stats *net_stats = &netdev->stats;
-       int i, j;
+       int i;
 
        fm10k_update_stats(interface);
 
@@ -318,11 +288,6 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
        fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
                                FM10K_GLOBAL_STATS_LEN);
 
-       if (interface->flags & FM10K_FLAG_DEBUG_STATS)
-               fm10k_add_ethtool_stats(&data, interface,
-                                       fm10k_gstrings_debug_stats,
-                                       FM10K_DEBUG_STATS_LEN);
-
        fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
                                fm10k_gstrings_mbx_stats,
                                FM10K_MBX_STATS_LEN);
@@ -333,33 +298,18 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
                                        FM10K_PF_STATS_LEN);
        }
 
-       if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
-               for (i = 0; i < iov_data->num_vfs; i++) {
-                       struct fm10k_vf_info *vf_info;
-
-                       vf_info = &iov_data->vf_info[i];
-
-                       fm10k_add_ethtool_stats(&data, &vf_info->mbx,
-                                               fm10k_gstrings_mbx_stats,
-                                               FM10K_MBX_STATS_LEN);
-               }
-       }
-
        for (i = 0; i < interface->hw.mac.max_queues; i++) {
                struct fm10k_ring *ring;
-               u64 *queue_stat;
 
                ring = interface->tx_ring[i];
-               if (ring)
-                       queue_stat = (u64 *)&ring->stats;
-               for (j = 0; j < stat_count; j++)
-                       *(data++) = ring ? queue_stat[j] : 0;
+               fm10k_add_ethtool_stats(&data, ring,
+                                       fm10k_gstrings_queue_stats,
+                                       FM10K_QUEUE_STATS_LEN);
 
                ring = interface->rx_ring[i];
-               if (ring)
-                       queue_stat = (u64 *)&ring->stats;
-               for (j = 0; j < stat_count; j++)
-                       *(data++) = ring ? queue_stat[j] : 0;
+               fm10k_add_ethtool_stats(&data, ring,
+                                       fm10k_gstrings_queue_stats,
+                                       FM10K_QUEUE_STATS_LEN);
        }
 }
 
@@ -1003,27 +953,14 @@ static void fm10k_self_test(struct net_device *dev,
 
 static u32 fm10k_get_priv_flags(struct net_device *netdev)
 {
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-       u32 priv_flags = 0;
-
-       if (interface->flags & FM10K_FLAG_DEBUG_STATS)
-               priv_flags |= BIT(FM10K_PRV_FLAG_DEBUG_STATS);
-
-       return priv_flags;
+       return 0;
 }
 
 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
 {
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-
        if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
                return -EINVAL;
 
-       if (priv_flags & BIT(FM10K_PRV_FLAG_DEBUG_STATS))
-               interface->flags |= FM10K_FLAG_DEBUG_STATS;
-       else
-               interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
        return 0;
 }
 
@@ -1034,15 +971,29 @@ u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
 
 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
 {
+       u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
        struct fm10k_hw *hw = &interface->hw;
-       int i;
+       u32 table[4];
+       int i, j;
 
        /* record entries to reta table */
-       for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
-               u32 reta = indir[0] |
-                          (indir[1] << 8) |
-                          (indir[2] << 16) |
-                          (indir[3] << 24);
+       for (i = 0; i < FM10K_RETA_SIZE; i++) {
+               u32 reta, n;
+
+               /* generate a new table if we weren't given one */
+               for (j = 0; j < 4; j++) {
+                       if (indir)
+                               n = indir[i + j];
+                       else
+                               n = ethtool_rxfh_indir_default(i + j, rss_i);
+
+                       table[j] = n;
+               }
+
+               reta = table[0] |
+                       (table[1] << 8) |
+                       (table[2] << 16) |
+                       (table[3] << 24);
 
                if (interface->reta[i] == reta)
                        continue;
@@ -1201,31 +1152,6 @@ static int fm10k_set_channels(struct net_device *dev,
        return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
 }
 
-static int fm10k_get_ts_info(struct net_device *dev,
-                            struct ethtool_ts_info *info)
-{
-       struct fm10k_intfc *interface = netdev_priv(dev);
-
-       info->so_timestamping =
-               SOF_TIMESTAMPING_TX_SOFTWARE |
-               SOF_TIMESTAMPING_RX_SOFTWARE |
-               SOF_TIMESTAMPING_SOFTWARE |
-               SOF_TIMESTAMPING_TX_HARDWARE |
-               SOF_TIMESTAMPING_RX_HARDWARE |
-               SOF_TIMESTAMPING_RAW_HARDWARE;
-
-       if (interface->ptp_clock)
-               info->phc_index = ptp_clock_index(interface->ptp_clock);
-       else
-               info->phc_index = -1;
-
-       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
-
-       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
-
-       return 0;
-}
-
 static const struct ethtool_ops fm10k_ethtool_ops = {
        .get_strings            = fm10k_get_strings,
        .get_sset_count         = fm10k_get_sset_count,
@@ -1253,7 +1179,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
        .set_rxfh               = fm10k_set_rssh,
        .get_channels           = fm10k_get_channels,
        .set_channels           = fm10k_set_channels,
-       .get_ts_info            = fm10k_get_ts_info,
 };
 
 void fm10k_set_ethtool_ops(struct net_device *dev)
index bbf7c4bac3038405332c5c9b9a6a38fdde875d6b..47f0743ec03b8f38f9cd3012a0cbc14bb8e3eb2a 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 0b465394f88aff832bc632f8d2106de6c9a82c9f..0e166e9c90c8657a90ca8aed40615ad090ae1d8d 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include "fm10k.h"
 
 #define DRV_VERSION    "0.19.3-k"
+#define DRV_SUMMARY    "Intel(R) Ethernet Switch Host Interface Driver"
 const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
-       "Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
 static const char fm10k_copyright[] =
-       "Copyright (c) 2013 Intel Corporation.";
+       "Copyright (c) 2013 - 2016 Intel Corporation.";
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
@@ -424,19 +424,6 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
                     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
-                             union fm10k_rx_desc *rx_desc,
-                             struct sk_buff *skb)
-{
-       struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
-       FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
-       if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
-               fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
-                                         le64_to_cpu(rx_desc->q.timestamp));
-}
-
 static void fm10k_type_trans(struct fm10k_ring *rx_ring,
                             union fm10k_rx_desc __maybe_unused *rx_desc,
                             struct sk_buff *skb)
@@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
 
        fm10k_rx_checksum(rx_ring, rx_desc, skb);
 
-       fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
        FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
                struct ipv6hdr *ipv6;
                u8 *raw;
        } network_hdr;
+       u8 *transport_hdr;
+       __be16 frag_off;
        __be16 protocol;
        u8 l4_hdr = 0;
 
@@ -852,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
                        goto no_csum;
                }
                network_hdr.raw = skb_inner_network_header(skb);
+               transport_hdr = skb_inner_transport_header(skb);
        } else {
                protocol = vlan_get_protocol(skb);
                network_hdr.raw = skb_network_header(skb);
+               transport_hdr = skb_transport_header(skb);
        }
 
        switch (protocol) {
@@ -863,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
                break;
        case htons(ETH_P_IPV6):
                l4_hdr = network_hdr.ipv6->nexthdr;
+               if (likely((transport_hdr - network_hdr.raw) ==
+                          sizeof(struct ipv6hdr)))
+                       break;
+               ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+                                     sizeof(struct ipv6hdr),
+                                &l4_hdr, &frag_off);
+               if (unlikely(frag_off))
+                       l4_hdr = NEXTHDR_FRAGMENT;
                break;
        default:
-               if (unlikely(net_ratelimit())) {
-                       dev_warn(tx_ring->dev,
-                                "partial checksum but ip version=%x!\n",
-                                protocol);
-               }
-               tx_ring->tx_stats.csum_err++;
-               goto no_csum;
+               break;
        }
 
        switch (l4_hdr) {
@@ -884,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
        default:
                if (unlikely(net_ratelimit())) {
                        dev_warn(tx_ring->dev,
-                                "partial checksum but l4 proto=%x!\n",
-                                l4_hdr);
+                                "partial checksum, version=%d l4 proto=%x\n",
+                                protocol, l4_hdr);
                }
+               skb_checksum_help(skb);
                tx_ring->tx_stats.csum_err++;
                goto no_csum;
        }
@@ -912,11 +904,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
        /* set type for advanced descriptor with frame checksum insertion */
        u32 desc_flags = 0;
 
-       /* set timestamping bits */
-       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-           likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
-               desc_flags |= FM10K_TXD_FLAG_TIME;
-
        /* set checksum offload bits */
        desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
                                     FM10K_TXD_FLAG_CSUM);
@@ -1198,9 +1185,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
  * fm10k_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
-                              struct fm10k_ring *tx_ring)
+                              struct fm10k_ring *tx_ring, int napi_budget)
 {
        struct fm10k_intfc *interface = q_vector->interface;
        struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
                total_packets += tx_buffer->gso_segs;
 
                /* free the skb */
-               dev_consume_skb_any(tx_buffer->skb);
+               napi_consume_skb(tx_buffer->skb, napi_budget);
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
@@ -1449,8 +1437,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
        int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
-       fm10k_for_each_ring(ring, q_vector->tx)
-               clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+       fm10k_for_each_ring(ring, q_vector->tx) {
+               if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+                       clean_complete = false;
+       }
 
        /* Handle case where we are called by netpoll with a budget of 0 */
        if (budget <= 0)
@@ -1468,7 +1458,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
                int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
 
                work_done += work;
-               clean_complete &= !!(work < per_ring_budget);
+               if (work >= per_ring_budget)
+                       clean_complete = false;
        }
 
        /* If all work not completed, return budget and keep polling */
@@ -1943,8 +1934,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
 static void fm10k_init_reta(struct fm10k_intfc *interface)
 {
        u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
-       struct net_device *netdev = interface->netdev;
-       u32 reta, *indir;
+       u32 reta;
 
        /* If the Rx flow indirection table has been configured manually, we
         * need to maintain it when possible.
@@ -1969,16 +1959,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
        }
 
 repopulate_reta:
-       indir = kcalloc(fm10k_get_reta_size(netdev),
-                       sizeof(indir[0]), GFP_KERNEL);
-
-       /* generate redirection table using the default kernel policy */
-       for (i = 0; i < fm10k_get_reta_size(netdev); i++)
-               indir[i] = ethtool_rxfh_indir_default(i, rss_i);
-
-       fm10k_write_reta(interface, indir);
-
-       kfree(indir);
+       fm10k_write_reta(interface, NULL);
 }
 
 /**
index 98202c3d591c83bc27c202c09432c702cc0b1f06..c9dfa6564fcf54eb8a3315cec39343590f64121c 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 245a0a3dc32e061fbc93f41cd37e0a82cd204b1b..b7dbc8a84c059e4efd94618816a4bacf202c6d05 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 1d0f0583222cefb638e314c50130b345a0d00146..2a08d3f5b6dfbc4b567a2c8e114220bd688d3823 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
 
        for (i = 0; i < interface->num_tx_queues; i++)
                fm10k_clean_tx_ring(interface->tx_ring[i]);
-
-       /* remove any stale timestamp buffers and free them */
-       skb_queue_purge(&interface->ts_tx_skb_queue);
 }
 
 /**
@@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                __skb_put(skb, pad_len);
        }
 
-       /* prepare packet for hardware time stamping */
-       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
-               fm10k_ts_tx_enqueue(interface, skb);
-
        if (r_idx >= interface->num_tx_queues)
                r_idx %= interface->num_tx_queues;
 
@@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev,
                return -EADDRNOTAVAIL;
 
        /* update table with current entries */
-       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
                err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev,
        u16 vid, glort = interface->glort;
 
        /* update table with current entries */
-       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
                hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
        }
 
        /* synchronize all of the addresses */
-       if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
-               __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
-               if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
-                       __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
-       }
+       __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+       __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
 
        fm10k_mbx_unlock(interface);
 }
@@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
        hw->mac.ops.update_vlan(hw, 0, 0, true);
 
        /* update table with current entries */
-       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+       for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
                hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
        hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
 
        /* synchronize all of the addresses */
-       if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
-               __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
-               if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
-                       __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
-       }
+       __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+       __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
 
        fm10k_mbx_unlock(interface);
 
@@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        return fm10k_setup_tc(dev, tc->tc);
 }
 
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-       switch (cmd) {
-       case SIOCGHWTSTAMP:
-               return fm10k_get_ts_config(netdev, ifr);
-       case SIOCSHWTSTAMP:
-               return fm10k_set_ts_config(netdev, ifr);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
 static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
                                  struct fm10k_l2_accel *l2_accel)
 {
@@ -1402,7 +1377,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_get_vf_config      = fm10k_ndo_get_vf_config,
        .ndo_add_vxlan_port     = fm10k_add_vxlan_port,
        .ndo_del_vxlan_port     = fm10k_del_vxlan_port,
-       .ndo_do_ioctl           = fm10k_ioctl,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
 #ifdef CONFIG_NET_POLL_CONTROLLER
index f0992950e228eb3aef31b62d30454c0cd6cd294f..e05aca9bef0e635bd33e5a882e022ade06732aa0 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
        WARN_ON(in_interrupt());
 
        /* put off any impending NetWatchDogTimeout */
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
                usleep_range(1000, 2000);
@@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
                        netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
        }
 
-       /* reset clock */
-       fm10k_ts_reset(interface);
-
        err = netif_running(netdev) ? fm10k_open(netdev) : 0;
        if (err)
                goto err_open;
@@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work)
        /* tasks only run when interface is up */
        fm10k_watchdog_subtask(interface);
        fm10k_check_hang_subtask(interface);
-       fm10k_ts_tx_subtask(interface);
 
        /* release lock on service events to allow scheduling next event */
        fm10k_service_event_complete(interface);
@@ -1204,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
        return 0;
 }
 
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
-                            struct fm10k_mbx_info __always_unused *mbx)
-{
-       struct fm10k_intfc *interface;
-       u64 timestamp;
-       s32 err;
-
-       err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
-                                    &timestamp);
-       if (err)
-               return err;
-
-       interface = container_of(hw, struct fm10k_intfc, hw);
-
-       fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
-       return 0;
-}
-
 /* generic error handler for mailbox issues */
 static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
                           struct fm10k_mbx_info __always_unused *mbx)
@@ -1243,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = {
        FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
        FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
        FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
-       FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
        FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
 };
 
@@ -1287,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
        u32 dglort_map = hw->mac.dglort_map;
        s32 err;
 
+       interface = container_of(hw, struct fm10k_intfc, hw);
+
+       err = fm10k_msg_err_pf(hw, results, mbx);
+       if (!err && hw->swapi.status) {
+               /* force link down for a reasonable delay */
+               interface->link_down_event = jiffies + (2 * HZ);
+               set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+               /* reset dglort_map back to no config */
+               hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+               fm10k_service_event_schedule(interface);
+
+               /* prevent overloading kernel message buffer */
+               if (interface->lport_map_failed)
+                       return 0;
+
+               interface->lport_map_failed = true;
+
+               if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+                       dev_warn(&interface->pdev->dev,
+                                "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+               dev_warn(&interface->pdev->dev,
+                        "request logical port map failed: %d\n",
+                        hw->swapi.status);
+
+               return 0;
+       }
+
        err = fm10k_msg_lport_map_pf(hw, results, mbx);
        if (err)
                return err;
 
-       interface = container_of(hw, struct fm10k_intfc, hw);
+       interface->lport_map_failed = false;
 
        /* we need to reset if port count was just updated */
        if (dglort_map != hw->mac.dglort_map)
@@ -1341,68 +1346,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
        return 0;
 }
 
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
-                            struct fm10k_mbx_info __always_unused *mbx)
-{
-       struct fm10k_swapi_1588_timestamp timestamp;
-       struct fm10k_iov_data *iov_data;
-       struct fm10k_intfc *interface;
-       u16 sglort, vf_idx;
-       s32 err;
-
-       err = fm10k_tlv_attr_get_le_struct(
-                               results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
-                               &timestamp, sizeof(timestamp));
-       if (err)
-               return err;
-
-       interface = container_of(hw, struct fm10k_intfc, hw);
-
-       if (timestamp.dglort) {
-               fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
-                                    le64_to_cpu(timestamp.egress));
-               return 0;
-       }
-
-       /* either dglort or sglort must be set */
-       if (!timestamp.sglort)
-               return FM10K_ERR_PARAM;
-
-       /* verify GLORT is at least one of the ones we own */
-       sglort = le16_to_cpu(timestamp.sglort);
-       if (!fm10k_glort_valid_pf(hw, sglort))
-               return FM10K_ERR_PARAM;
-
-       if (sglort == interface->glort) {
-               fm10k_ts_tx_hwtstamp(interface, 0,
-                                    le64_to_cpu(timestamp.ingress));
-               return 0;
-       }
-
-       /* if there is no iov_data then there is no mailbox to process */
-       if (!ACCESS_ONCE(interface->iov_data))
-               return FM10K_ERR_PARAM;
-
-       rcu_read_lock();
-
-       /* notify VF if this timestamp belongs to it */
-       iov_data = interface->iov_data;
-       vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
-       if (!iov_data || vf_idx >= iov_data->num_vfs) {
-               err = FM10K_ERR_PARAM;
-               goto err_unlock;
-       }
-
-       err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
-                                          le64_to_cpu(timestamp.ingress));
-
-err_unlock:
-       rcu_read_unlock();
-
-       return err;
-}
-
 static const struct fm10k_msg_data pf_mbx_data[] = {
        FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
        FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1410,7 +1353,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = {
        FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
        FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
        FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
-       FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
        FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
 };
 
@@ -1789,18 +1731,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
                return -EIO;
        }
 
-       /* assign BAR 4 resources for use with PTP */
-       if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
-               interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
-                                            pci_resource_len(pdev, 4));
-       hw->sw_addr = interface->sw_addr;
-
        /* initialize DCBNL interface */
        fm10k_dcbnl_set_ops(netdev);
 
-       /* Intitialize timestamp data */
-       fm10k_ts_init(interface);
-
        /* set default ring sizes */
        interface->tx_ring_count = FM10K_DEFAULT_TXD;
        interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -2018,9 +1951,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* kick off service timer now, even when interface is down */
        mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
 
-       /* Register PTP interface */
-       fm10k_ptp_register(interface);
-
        /* print warning for non-optimal configurations */
        fm10k_slot_warn(interface);
 
@@ -2077,9 +2007,6 @@ static void fm10k_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
-       /* cleanup timestamp handling */
-       fm10k_ptp_unregister(interface);
-
        /* release VFs */
        fm10k_iov_disable(pdev);
 
@@ -2152,9 +2079,6 @@ static int fm10k_resume(struct pci_dev *pdev)
        /* reset statistics starting values */
        hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
 
-       /* reset clock */
-       fm10k_ts_reset(interface);
-
        rtnl_lock();
 
        err = fm10k_init_queueing_scheme(interface);
@@ -2271,6 +2195,8 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
+       rtnl_lock();
+
        if (netif_running(netdev))
                fm10k_close(netdev);
 
@@ -2279,7 +2205,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
        /* free interrupts */
        fm10k_clear_queueing_scheme(interface);
 
-       pci_disable_device(pdev);
+       rtnl_unlock();
 
        /* Request a slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
@@ -2349,27 +2275,31 @@ static void fm10k_io_resume(struct pci_dev *pdev)
        /* reset statistics starting values */
        hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
 
+       rtnl_lock();
+
        err = fm10k_init_queueing_scheme(interface);
        if (err) {
                dev_err(&interface->pdev->dev,
                        "init_queueing_scheme failed: %d\n", err);
-               return;
+               goto unlock;
        }
 
        /* reassociate interrupts */
        fm10k_mbx_request_irq(interface);
 
-       /* reset clock */
-       fm10k_ts_reset(interface);
-
+       rtnl_lock();
        if (netif_running(netdev))
                err = fm10k_open(netdev);
+       rtnl_unlock();
 
        /* final check of hardware state before registering the interface */
        err = err ? : fm10k_hw_ready(interface);
 
        if (!err)
                netif_device_attach(netdev);
+
+unlock:
+       rtnl_unlock();
 }
 
 static const struct pci_error_handlers fm10k_err_handler = {
index ecc99f9d2cce9604068e424e743b56404afda431..dc75507c99265300f9104d67222df32a8c36fb92 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
 
        /* VLAN multi-bit write:
         * The multi-bit write has several parts to it.
-        *    3                   2                   1                   0
-        *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+        *               24              16               8               0
+        *  7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
         * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
         * | RSVD0 |         Length        |C|RSVD0|        VLAN ID        |
         * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
        if (!fm10k_glort_valid_pf(hw, glort))
                return FM10K_ERR_PARAM;
 
+       /* reset multicast mode if deleting lport */
+       if (!enable)
+               fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
        /* construct the lport message from the 2 pieces of data we have */
        lport_msg = ((u32)count << 16) | glort;
 
@@ -864,9 +868,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
        fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
        fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 
-       /* determine correct default VLAN ID */
+       /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+        * used here to indicate to the VF that it will not have privilege to
+        * write VLAN_TABLE. All policy is enforced on the PF but this allows
+        * the VF to correctly report errors to userspace rqeuests.
+        */
        if (vf_info->pf_vid)
-               vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+               vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
        else
                vf_vid = vf_info->sw_vid;
 
@@ -1140,19 +1148,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
        fm10k_update_hw_stats_q(hw, q, idx, qpp);
 }
 
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
-                                        struct fm10k_vf_info *vf_info,
-                                        u64 timestamp)
-{
-       u32 msg[4];
-
-       /* generate port state response to notify VF it is not ready */
-       fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
-       fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
-       return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
 /**
  *  fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
  *  @hw: Pointer to hardware structure
@@ -1223,18 +1218,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                if (err)
                        return err;
 
-               /* verify upper 16 bits are zero */
-               if (vid >> 16)
-                       return FM10K_ERR_PARAM;
-
                set = !(vid & FM10K_VLAN_CLEAR);
                vid &= ~FM10K_VLAN_CLEAR;
 
-               err = fm10k_iov_select_vid(vf_info, (u16)vid);
-               if (err < 0)
-                       return err;
+               /* if the length field has been set, this is a multi-bit
+                * update request. For multi-bit requests, simply disallow
+                * them when the pf_vid has been set. In this case, the PF
+                * should have already cleared the VLAN_TABLE, and if we
+                * allowed them, it could allow a rogue VF to receive traffic
+                * on a VLAN it was not assigned. In the single-bit case, we
+                * need to modify requests for VLAN 0 to use the default PF or
+                * SW vid when assigned.
+                */
 
-               vid = err;
+               if (vid >> 16) {
+                       /* prevent multi-bit requests when PF has
+                        * administratively set the VLAN for this VF
+                        */
+                       if (vf_info->pf_vid)
+                               return FM10K_ERR_PARAM;
+               } else {
+                       err = fm10k_iov_select_vid(vf_info, (u16)vid);
+                       if (err < 0)
+                               return err;
+
+                       vid = err;
+               }
 
                /* update VSI info for VF in regards to VLAN table */
                err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
@@ -1633,6 +1642,8 @@ out:
 
 /* This structure defines the attibutes to be parsed below */
 const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+       FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+                                sizeof(struct fm10k_swapi_error)),
        FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
        FM10K_TLV_ATTR_LAST
 };
@@ -1773,89 +1784,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
        return 0;
 }
 
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
-       FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
-                                sizeof(struct fm10k_swapi_1588_timestamp)),
-       FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- *  fm10k_adjust_systime_pf - Adjust systime frequency
- *  @hw: pointer to hardware structure
- *  @ppb: adjustment rate in parts per billion
- *
- *  This function will adjust the SYSTIME_CFG register contained in BAR 4
- *  if this function is supported for BAR 4 access.  The adjustment amount
- *  is based on the parts per billion value provided and adjusted to a
- *  value based on parts per 2^48 clock cycles.
- *
- *  If adjustment is not supported or the requested value is too large
- *  we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
-       u64 systime_adjust;
-
-       /* if sw_addr is not set we don't have switch register access */
-       if (!hw->sw_addr)
-               return ppb ? FM10K_ERR_PARAM : 0;
-
-       /* we must convert the value from parts per billion to parts per
-        * 2^48 cycles.  In addition I have opted to only use the 30 most
-        * significant bits of the adjustment value as the 8 least
-        * significant bits are located in another register and represent
-        * a value significantly less than a part per billion, the result
-        * of dropping the 8 least significant bits is that the adjustment
-        * value is effectively multiplied by 2^8 when we write it.
-        *
-        * As a result of all this the math for this breaks down as follows:
-        *      ppb / 10^9 == adjust * 2^8 / 2^48
-        * If we solve this for adjust, and simplify it comes out as:
-        *      ppb * 2^31 / 5^9 == adjust
-        */
-       systime_adjust = (ppb < 0) ? -ppb : ppb;
-       systime_adjust <<= 31;
-       do_div(systime_adjust, 1953125);
-
-       /* verify the requested adjustment value is in range */
-       if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
-               return FM10K_ERR_PARAM;
-
-       if (ppb > 0)
-               systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
-       fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
-       return 0;
-}
-
-/**
- *  fm10k_read_systime_pf - Reads value of systime registers
- *  @hw: pointer to the hardware structure
- *
- *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanosecods.  In order to guarantee the value is accurate
- *  we check the 32 most significant bits both before and after reading the
- *  32 least significant bits to verify they didn't change as we were reading
- *  the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
-       u32 systime_l, systime_h, systime_tmp;
-
-       systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
-       do {
-               systime_tmp = systime_h;
-               systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
-               systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-       } while (systime_tmp != systime_h);
-
-       return ((u64)systime_h << 32) | systime_l;
-}
-
 static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
        FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
        FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1885,8 +1813,6 @@ static const struct fm10k_mac_ops mac_ops_pf = {
        .set_dma_mask           = fm10k_set_dma_mask_pf,
        .get_fault              = fm10k_get_fault_pf,
        .get_host_state         = fm10k_get_host_state_pf,
-       .adjust_systime         = fm10k_adjust_systime_pf,
-       .read_systime           = fm10k_read_systime_pf,
 };
 
 static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1898,7 +1824,6 @@ static const struct fm10k_iov_ops iov_ops_pf = {
        .set_lport                      = fm10k_iov_set_lport_pf,
        .reset_lport                    = fm10k_iov_reset_lport_pf,
        .update_stats                   = fm10k_iov_update_stats_pf,
-       .report_timestamp               = fm10k_iov_report_timestamp_pf,
 };
 
 static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
index b2d96b45ca3ccadc936170557b4a32f337f7396c..3336d3c10760a0fa89ac35f184539e002ba9bd67 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 {
        FM10K_PF_MSG_ID_UPDATE_FLOW             = 0x503,
        FM10K_PF_MSG_ID_DELETE_FLOW             = 0x504,
        FM10K_PF_MSG_ID_SET_FLOW_STATE          = 0x505,
-       FM10K_PF_MSG_ID_GET_1588_INFO           = 0x506,
-       FM10K_PF_MSG_ID_1588_TIMESTAMP          = 0x701,
 };
 
 enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 {
        FM10K_PF_ATTR_ID_DELETE_FLOW            = 0x0B,
        FM10K_PF_ATTR_ID_PORT                   = 0x0C,
        FM10K_PF_ATTR_ID_UPDATE_PVID            = 0x0D,
-       FM10K_PF_ATTR_ID_1588_TIMESTAMP         = 0x10,
 };
 
 #define FM10K_MSG_LPORT_MAP_GLORT_SHIFT        0
@@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
 #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT       16
 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE                16
 
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED        280
+
 /* The following data structures are overlayed directly onto TLV mailbox
  * messages, and must not break 4 byte alignment. Ensure the structures line
  * up correctly as per their TLV definition.
@@ -100,13 +99,6 @@ struct fm10k_swapi_error {
        struct fm10k_global_table_data  ffu;
 } __aligned(4) __packed;
 
-struct fm10k_swapi_1588_timestamp {
-       __le64 egress;
-       __le64 ingress;
-       __le16 dglort;
-       __le16 sglort;
-} __aligned(4) __packed;
-
 s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
 #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
 #define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
        FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
 
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
-       FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
-                         fm10k_1588_timestamp_msg_attr, func)
-
 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
                              struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644 (file)
index 1c1ccad..0000000
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT            (HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
-                              struct skb_shared_hwtstamps *hwtstamp,
-                              u64 systime)
-{
-       unsigned long flags;
-
-       read_lock_irqsave(&interface->systime_lock, flags);
-       systime += interface->ptp_adjust;
-       read_unlock_irqrestore(&interface->systime_lock, flags);
-
-       hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
-                                      __le16 dglort)
-{
-       struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-       struct sk_buff *skb;
-
-       skb_queue_walk(list, skb) {
-               if (FM10K_CB(skb)->fi.w.dglort == dglort)
-                       return skb;
-       }
-
-       return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
-       struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-       struct sk_buff *clone;
-       unsigned long flags;
-
-       /* create clone for us to return on the Tx path */
-       clone = skb_clone_sk(skb);
-       if (!clone)
-               return;
-
-       FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
-       spin_lock_irqsave(&list->lock, flags);
-
-       /* attempt to locate any buffers with the same dglort,
-        * if none are present then insert skb in tail of list
-        */
-       skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
-       if (!skb) {
-               skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
-               __skb_queue_tail(list, clone);
-       }
-
-       spin_unlock_irqrestore(&list->lock, flags);
-
-       /* if list is already has one then we just free the clone */
-       if (skb)
-               dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
-                         u64 systime)
-{
-       struct skb_shared_hwtstamps shhwtstamps;
-       struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-       struct sk_buff *skb;
-       unsigned long flags;
-
-       spin_lock_irqsave(&list->lock, flags);
-
-       /* attempt to locate and pull the sk_buff out of the list */
-       skb = fm10k_ts_tx_skb(interface, dglort);
-       if (skb)
-               __skb_unlink(skb, list);
-
-       spin_unlock_irqrestore(&list->lock, flags);
-
-       /* if not found do nothing */
-       if (!skb)
-               return;
-
-       /* timestamp the sk_buff and free out copy */
-       fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
-       skb_tstamp_tx(skb, &shhwtstamps);
-       dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
-       struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-       struct sk_buff *skb, *tmp;
-       unsigned long flags;
-
-       /* If we're down or resetting, just bail */
-       if (test_bit(__FM10K_DOWN, &interface->state) ||
-           test_bit(__FM10K_RESETTING, &interface->state))
-               return;
-
-       spin_lock_irqsave(&list->lock, flags);
-
-       /* walk though the list and flush any expired timestamp packets */
-       skb_queue_walk_safe(list, skb, tmp) {
-               if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
-                       continue;
-               __skb_unlink(skb, list);
-               kfree_skb(skb);
-               interface->tx_hwtstamp_timeouts++;
-       }
-
-       spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
-       struct fm10k_hw *hw = &interface->hw;
-
-       return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
-       s64 ns = ktime_to_ns(ktime_get_real());
-       unsigned long flags;
-
-       /* reinitialize the clock */
-       write_lock_irqsave(&interface->systime_lock, flags);
-       interface->ptp_adjust = fm10k_systime_read(interface) - ns;
-       write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
-       /* Initialize lock protecting systime access */
-       rwlock_init(&interface->systime_lock);
-
-       /* Initialize skb queue for pending timestamp requests */
-       skb_queue_head_init(&interface->ts_tx_skb_queue);
-
-       /* reset the clock to current kernel time */
-       fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-       struct hwtstamp_config *config = &interface->ts_config;
-
-       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-               -EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-       struct hwtstamp_config ts_config;
-
-       if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
-               return -EFAULT;
-
-       /* reserved for future extensions */
-       if (ts_config.flags)
-               return -EINVAL;
-
-       switch (ts_config.tx_type) {
-       case HWTSTAMP_TX_OFF:
-               break;
-       case HWTSTAMP_TX_ON:
-               /* we likely need some check here to see if this is supported */
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       switch (ts_config.rx_filter) {
-       case HWTSTAMP_FILTER_NONE:
-               interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-       case HWTSTAMP_FILTER_ALL:
-               interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
-               ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       /* save these settings for future reference */
-       interface->ts_config = ts_config;
-
-       return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
-               -EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
-       struct fm10k_intfc *interface;
-       struct fm10k_hw *hw;
-       int err;
-
-       interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-       hw = &interface->hw;
-
-       err = hw->mac.ops.adjust_systime(hw, ppb);
-
-       /* the only error we should see is if the value is out of range */
-       return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
-       struct fm10k_intfc *interface;
-       unsigned long flags;
-
-       interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-       write_lock_irqsave(&interface->systime_lock, flags);
-       interface->ptp_adjust += delta;
-       write_unlock_irqrestore(&interface->systime_lock, flags);
-
-       return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
-       struct fm10k_intfc *interface;
-       unsigned long flags;
-       u64 now;
-
-       interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-       read_lock_irqsave(&interface->systime_lock, flags);
-       now = fm10k_systime_read(interface) + interface->ptp_adjust;
-       read_unlock_irqrestore(&interface->systime_lock, flags);
-
-       *ts = ns_to_timespec64(now);
-
-       return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
-                            const struct timespec64 *ts)
-{
-       struct fm10k_intfc *interface;
-       unsigned long flags;
-       u64 ns = timespec64_to_ns(ts);
-
-       interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-       write_lock_irqsave(&interface->systime_lock, flags);
-       interface->ptp_adjust = fm10k_systime_read(interface) - ns;
-       write_unlock_irqrestore(&interface->systime_lock, flags);
-
-       return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq,
-                           int __always_unused on)
-{
-       struct ptp_clock_time *t = &rq->perout.period;
-       struct fm10k_intfc *interface;
-       struct fm10k_hw *hw;
-       u64 period;
-       u32 step;
-
-       /* we can only support periodic output */
-       if (rq->type != PTP_CLK_REQ_PEROUT)
-               return -EINVAL;
-
-       /* verify the requested channel is there */
-       if (rq->perout.index >= ptp->n_per_out)
-               return -EINVAL;
-
-       /* we cannot enforce start time as there is no
-        * mechanism for that in the hardware, we can only control
-        * the period.
-        */
-
-       /* we cannot support periods greater than 4 seconds due to reg limit */
-       if (t->sec > 4 || t->sec < 0)
-               return -ERANGE;
-
-       interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-       hw = &interface->hw;
-
-       /* we simply cannot support the operation if we don't have BAR4 */
-       if (!hw->sw_addr)
-               return -ENOTSUPP;
-
-       /* convert to unsigned 64b ns, verify we can put it in a 32b register */
-       period = t->sec * 1000000000LL + t->nsec;
-
-       /* determine the minimum size for period */
-       step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
-                   FM10K_SYSTIME_CFG_STEP_MASK);
-
-       /* verify the value is in range supported by hardware */
-       if ((period && (period < step)) || (period > U32_MAX))
-               return -ERANGE;
-
-       /* notify hardware of request to being sending pulses */
-       fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
-                          (u32)period);
-
-       return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
-       {
-               .name = "IEEE1588_PULSE0",
-               .index = 0,
-               .func = PTP_PF_PEROUT,
-               .chan = 0
-       },
-       {
-               .name = "IEEE1588_PULSE1",
-               .index = 1,
-               .func = PTP_PF_PEROUT,
-               .chan = 1
-       }
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
-                           enum ptp_pin_function func, unsigned int chan)
-{
-       /* verify the requested pin is there */
-       if (pin >= ptp->n_pins || !ptp->pin_config)
-               return -EINVAL;
-
-       /* enforce locked channels, no changing them */
-       if (chan != ptp->pin_config[pin].chan)
-               return -EINVAL;
-
-       /* we want to keep the functions locked as well */
-       if (func != ptp->pin_config[pin].func)
-               return -EINVAL;
-
-       return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
-       struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
-       struct device *dev = &interface->pdev->dev;
-       struct ptp_clock *ptp_clock;
-
-       snprintf(ptp_caps->name, sizeof(ptp_caps->name),
-                "%s", interface->netdev->name);
-       ptp_caps->owner         = THIS_MODULE;
-       /* This math is simply the inverse of the math in
-        * fm10k_adjust_systime_pf applied to an adjustment value
-        * of 2^30 - 1 which is the maximum value of the register:
-        *      max_ppb == ((2^30 - 1) * 5^9) / 2^31
-        */
-       ptp_caps->max_adj       = 976562;
-       ptp_caps->adjfreq       = fm10k_ptp_adjfreq;
-       ptp_caps->adjtime       = fm10k_ptp_adjtime;
-       ptp_caps->gettime64     = fm10k_ptp_gettime;
-       ptp_caps->settime64     = fm10k_ptp_settime;
-
-       /* provide pins if BAR4 is accessible */
-       if (interface->sw_addr) {
-               /* enable periodic outputs */
-               ptp_caps->n_per_out = 2;
-               ptp_caps->enable        = fm10k_ptp_enable;
-
-               /* enable clock pins */
-               ptp_caps->verify        = fm10k_ptp_verify;
-               ptp_caps->n_pins = 2;
-               ptp_caps->pin_config = fm10k_ptp_pd;
-       }
-
-       ptp_clock = ptp_clock_register(ptp_caps, dev);
-       if (IS_ERR(ptp_clock)) {
-               ptp_clock = NULL;
-               dev_err(dev, "ptp_clock_register failed\n");
-       } else {
-               dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
-       }
-
-       interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
-       struct ptp_clock *ptp_clock = interface->ptp_clock;
-       struct device *dev = &interface->pdev->dev;
-
-       if (!ptp_clock)
-               return;
-
-       interface->ptp_clock = NULL;
-
-       ptp_clock_unregister(ptp_clock);
-       dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
index b999897e50d82f13db320151f09e05c7f9e0e543..f8e87bf086b938fea1ca776cd4162dfe62587402 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
  *  up into an array of pointers stored in results.  The function will
  *  return FM10K_ERR_PARAM on any input or message error,
  *  FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- *  and 0 on success.
+ *  and 0 on success. Any attributes not found in tlv_attr will be silently
+ *  ignored.
  **/
 static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
                                const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
        while (offset < len) {
                attr_id = *attr & FM10K_TLV_ID_MASK;
 
-               if (attr_id < FM10K_TLV_RESULTS_MAX)
-                       err = fm10k_tlv_attr_validate(attr, tlv_attr);
-               else
-                       err = FM10K_NOT_IMPLEMENTED;
+               if (attr_id >= FM10K_TLV_RESULTS_MAX)
+                       return FM10K_NOT_IMPLEMENTED;
 
-               if (err < 0)
+               err = fm10k_tlv_attr_validate(attr, tlv_attr);
+               if (err == FM10K_NOT_IMPLEMENTED)
+                       ; /* silently ignore non-implemented attributes */
+               else if (err)
                        return err;
-               if (!err)
+               else
                        results[attr_id] = attr;
 
                /* update offset */
index e1845e0a17d81c1112f4bb343f59e8e08a1c65b4..a1f1027fe18402acffc05c507a3252968807e2c6 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 5c0533054c5fb31c4edc2281c8445f0f4a4a4542..b8bc0618372052e780d2f1cb9250e72a06ea51fc 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@ struct fm10k_hw;
 #define FM10K_STATS_LOOPBACK_DROP      0x3806
 #define FM10K_STATS_NODESC_DROP                0x3807
 
-/* Timesync registers */
-#define FM10K_SYSTIME          0x3814
-#define FM10K_SYSTIME_CFG      0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK            0x0000000F
-
 /* PCIe state registers */
 #define FM10K_PHYADDR          0x381C
 
@@ -355,6 +350,7 @@ struct fm10k_hw;
 #define FM10K_VLAN_TABLE_VSI_MAX               64
 #define FM10K_VLAN_LENGTH_SHIFT                        16
 #define FM10K_VLAN_CLEAR                       BIT(15)
+#define FM10K_VLAN_OVERRIDE                    FM10K_VLAN_CLEAR
 #define FM10K_VLAN_ALL \
        ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
 
@@ -381,12 +377,6 @@ struct fm10k_hw;
 #define FM10K_VFSYSTIME                0x00040
 #define FM10K_VFITR(_n)                ((_n) + 0x00060)
 
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST        0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK           0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE   0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n)     ((_n) + 0x02252)
-
 enum fm10k_int_source {
        fm10k_int_mailbox               = 0,
        fm10k_int_pcie_fault            = 1,
@@ -550,8 +540,6 @@ struct fm10k_mac_ops {
                                    struct fm10k_dglort_cfg *);
        void (*set_dma_mask)(struct fm10k_hw *, u64);
        s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
-       s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
-       u64 (*read_systime)(struct fm10k_hw *);
 };
 
 enum fm10k_mac_type {
@@ -643,7 +631,6 @@ struct fm10k_iov_ops {
        s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
        void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
        void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
-       s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
 };
 
 struct fm10k_iov_info {
@@ -667,7 +654,6 @@ struct fm10k_info {
 
 struct fm10k_hw {
        u32 __iomem *hw_addr;
-       u32 __iomem *sw_addr;
        void *back;
        struct fm10k_mac_info mac;
        struct fm10k_bus_info bus;
index 91f8d7311f3bfc08dd91ad7edb8a3641cdd58d21..3b06685ea63bd81b7d12406b4096f990b77bb0b0 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
        if (vsi)
                return FM10K_ERR_PARAM;
 
-       /* verify upper 4 bits of vid and length are 0 */
+       /* clever trick to verify reserved bits in both vid and length */
        if ((vid << 16 | vid) >> 28)
                return FM10K_ERR_PARAM;
 
@@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
 
        ether_addr_copy(hw->mac.perm_addr, perm_addr);
        hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
-       hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+       hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
 
        return 0;
 }
@@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
        return mbx->ops.enqueue_tx(hw, mbx, msg);
 }
 
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
-       FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
-       FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
 /**
  *  fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
  *  @hw: pointer to hardware structure
@@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
        return 0;
 }
 
-/**
- *  fm10k_adjust_systime_vf - Adjust systime frequency
- *  @hw: pointer to hardware structure
- *  @ppb: adjustment rate in parts per billion
- *
- *  This function takes an adjustment rate in parts per billion and will
- *  verify that this value is 0 as the VF cannot support adjusting the
- *  systime clock.
- *
- *  If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
-       /* The VF cannot adjust the clock frequency, however it should
-        * already have a syntonic clock with whichever host interface is
-        * running as the master for the host interface clock domain so
-        * there should be not frequency adjustment necessary.
-        */
-       return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- *  fm10k_read_systime_vf - Reads value of systime registers
- *  @hw: pointer to the hardware structure
- *
- *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanoseconds.  In order to guarantee the value is accurate
- *  we check the 32 most significant bits both before and after reading the
- *  32 least significant bits to verify they didn't change as we were reading
- *  the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
-       u32 systime_l, systime_h, systime_tmp;
-
-       systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
-       do {
-               systime_tmp = systime_h;
-               systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
-               systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-       } while (systime_tmp != systime_h);
-
-       return ((u64)systime_h << 32) | systime_l;
-}
-
 static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
        FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
        FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = {
        .rebind_hw_stats        = fm10k_rebind_hw_stats_vf,
        .configure_dglort_map   = fm10k_configure_dglort_map_vf,
        .get_host_state         = fm10k_get_host_state_generic,
-       .adjust_systime         = fm10k_adjust_systime_vf,
-       .read_systime           = fm10k_read_systime_vf,
 };
 
 static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
index c4439f1313a016cf42486fc90f17d665f23f9368..2662f33c0c714071da9d654fb391966e4d4f8223 100644 (file)
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id {
        FM10K_VF_MSG_ID_MSIX,
        FM10K_VF_MSG_ID_MAC_VLAN,
        FM10K_VF_MSG_ID_LPORT_STATE,
-       FM10K_VF_MSG_ID_1588,
        FM10K_VF_MSG_ID_MAX,
 };
 
@@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id {
        FM10K_LPORT_STATE_MSG_MAX
 };
 
-enum fm10k_tlv_1588_attr_id {
-       FM10K_1588_MSG_TIMESTAMP,
-       FM10K_1588_MSG_MAX
-};
-
 #define FM10K_VF_MSG_MSIX_HANDLER(func) \
         FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
 
@@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
        FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
                          fm10k_lport_state_msg_attr, func)
 
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
-       FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
 extern const struct fm10k_info fm10k_vf_info;
 #endif /* _FM10K_VF_H */
index d25b3be5ba89ac90e9ee6791d5709090e4318580..2a6a5d3dd874e713e451f6347cdb2e34fcae98ef 100644 (file)
 #define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
 #define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
 #define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
-#define I40E_PRIV_FLAGS_PS             BIT(4)
 #define I40E_PRIV_FLAGS_HW_ATR_EVICT   BIT(5)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
 #define I40E_RX_DESC(R, i)                     \
-       ((ring_is_16byte_desc_enabled(R))       \
-               ? (union i40e_32byte_rx_desc *) \
-                       (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
-               : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+       (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
 #define I40E_TX_DESC(R, i)                     \
        (&(((struct i40e_tx_desc *)((R)->desc))[i]))
 #define I40E_TX_CTXTDESC(R, i)                 \
@@ -202,6 +198,7 @@ struct i40e_lump_tracking {
 
 #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
 #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -319,8 +316,6 @@ struct i40e_pf {
 #define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
 #define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
 #define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED                        BIT_ULL(5)
 #define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
 #define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
 #define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
@@ -329,7 +324,6 @@ struct i40e_pf {
 #ifdef I40E_FCOE
 #define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
 #define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
 #define I40E_FLAG_SERVICE_CLIENT_REQUESTED     BIT_ULL(16)
@@ -533,9 +527,7 @@ struct i40e_vsi {
        u8  *rss_lut_user;  /* User configured lookup table entries */
 
        u16 max_frame;
-       u16 rx_hdr_len;
        u16 rx_buf_len;
-       u8  dtype;
 
        /* List of q_vectors allocated to this VSI */
        struct i40e_q_vector **q_vectors;
@@ -553,7 +545,7 @@ struct i40e_vsi {
        u16 num_queue_pairs; /* Used tx and rx pairs */
        u16 num_desc;
        enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
-       u16 vf_id;              /* Virtual function ID for SRIOV VSIs */
+       s16 vf_id;              /* Virtual function ID for SRIOV VSIs */
 
        struct i40e_tc_configuration tc_config;
        struct i40e_aqc_vsi_properties_data info;
index 43bb4139d8963d0e8499652fcee420d5d6033644..738b42a44f20b420eccdb4851baca5215a7100c8 100644 (file)
@@ -617,10 +617,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        hw->nvm_release_on_done = false;
        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-       ret_code = i40e_aq_set_hmc_resource_profile(hw,
-                                                   I40E_HMC_PROFILE_DEFAULT,
-                                                   0,
-                                                   NULL);
        ret_code = 0;
 
        /* success! */
index 8d5c65ab6267054feeddaa1b06c5a5ee97077a5e..eacbe7430b4833279aa81c969d9f581c842deffb 100644 (file)
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
 #define I40E_AQ_FLAG_EI_SHIFT  14
 #define I40E_AQ_FLAG_FE_SHIFT  15
 
-#define I40E_AQ_FLAG_DD                (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP       (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR       (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE       (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB                (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD                (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC       (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF       (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI                (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI                (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE                (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD                BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP       BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR       BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE       BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB                BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD                BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC       BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF       BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI                BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI                BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE                BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_resume_port_tx                             = 0x041C,
        i40e_aqc_opc_configure_partition_bw                     = 0x041D,
 
-       /* hmc */
-       i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
-       i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
-
        /* phy commands*/
        i40e_aqc_opc_get_phy_abilities          = 0x0600,
        i40e_aqc_opc_set_phy_config             = 0x0601,
@@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
 #define I40E_AQ_CAP_ID_WSR_PROT                0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT                0x0080
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data {
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-       u8      pm_profile;
-       u8      pe_vf_enabled;
-       u8      reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-       /* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-       I40E_HMC_PROFILE_DEFAULT        = 1,
-       I40E_HMC_PROFILE_FAVOR_VF       = 2,
-       I40E_HMC_PROFILE_EQUAL          = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
-
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type {
 
 enum i40e_aq_link_speed {
        I40E_LINK_SPEED_UNKNOWN = 0,
-       I40E_LINK_SPEED_100MB   = (1 << I40E_LINK_SPEED_100MB_SHIFT),
-       I40E_LINK_SPEED_1GB     = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
-       I40E_LINK_SPEED_10GB    = (1 << I40E_LINK_SPEED_10GB_SHIFT),
-       I40E_LINK_SPEED_40GB    = (1 << I40E_LINK_SPEED_40GB_SHIFT),
-       I40E_LINK_SPEED_20GB    = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+       I40E_LINK_SPEED_100MB   = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+       I40E_LINK_SPEED_1GB     = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+       I40E_LINK_SPEED_10GB    = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+       I40E_LINK_SPEED_40GB    = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+       I40E_LINK_SPEED_20GB    = BIT(I40E_LINK_SPEED_20GB_SHIFT)
 };
 
 struct i40e_aqc_module_desc {
@@ -1927,9 +1903,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-                               (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+                               BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
 #define I40E_AQ_ANVM_FEATURE           0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD   BIT(FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
@@ -2226,13 +2202,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
  */
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << \
-                                       SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB        0x0
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT   (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK    (1 << \
-                               SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+                       BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS         0x1
        u8      type;
        u8      reserved0;
@@ -2250,7 +2224,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
 struct i40e_aqc_lldp_stop_start_specific_agent {
 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \
-                               (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+                               BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
        u8      command;
        u8      reserved[15];
 };
@@ -2303,7 +2277,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID         BIT(15)
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT      0
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK       (0x3FF << \
                                        I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2297,13 @@ struct i40e_aqc_get_set_rss_key_data {
 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
 
 struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID         BIT(15)
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT      0
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK       (0x3FF << \
                                        I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
        __le16  vsi_id;
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT  0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   (0x1 << \
-                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
 
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI    0
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF     1
index bf6b453d93a1a2fbfacb738f02fef36cb6929503..a4601d97fb24155f978ea9bb8de76276a14efc8d 100644 (file)
@@ -217,7 +217,7 @@ struct i40e_client {
 #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE      BIT(0)
 #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS      BIT(2)
        enum i40e_client_type type;
-       struct i40e_client_ops *ops;    /* client ops provided by the client */
+       const struct i40e_client_ops *ops; /* client ops provided by the client */
 };
 
 static inline bool i40e_client_is_registered(struct i40e_client *client)
index f3c1d8890cbbbbcfea82a614d6030affdc15ba9c..4a934e14574d4de7786dd06052f847b87a6c9ee8 100644 (file)
@@ -61,6 +61,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_1G_BASE_T_X722:
                case I40E_DEV_ID_10G_BASE_T_X722:
                case I40E_DEV_ID_SFP_I_X722:
+               case I40E_DEV_ID_QSFP_I_X722:
                        hw->mac.type = I40E_MAC_X722;
                        break;
                default:
@@ -2037,6 +2038,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                                                        u16 seid, bool enable,
+                                                        u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       enum i40e_status_code status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+       cmd->promiscuous_flags = cpu_to_le16(flags);
+       cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+       cmd->seid = cpu_to_le16(seid);
+       cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                                                        u16 seid, bool enable,
+                                                        u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       enum i40e_status_code status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+       cmd->promiscuous_flags = cpu_to_le16(flags);
+       cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       cmd->seid = cpu_to_le16(seid);
+       cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_set_vsi_broadcast
  * @hw: pointer to the hw struct
@@ -2667,10 +2738,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
                        u16 *rules_used, u16 *rules_free)
 {
        /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
-       if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
-               if (!rule_id)
-                       return I40E_ERR_PARAM;
-       } else {
+       if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
                /* count and mr_list shall be valid for rule_type INGRESS VLAN
                 * mirroring. For other rule_type, count and rule_type should
                 * not matter.
@@ -2786,36 +2854,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
        return status;
 }
 
-/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile profile,
-                               u8 pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details)
-{
-       struct i40e_aq_desc desc;
-       struct i40e_aq_get_set_hmc_resource_profile *cmd =
-               (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
-       i40e_status status;
-
-       i40e_fill_default_direct_cmd_desc(&desc,
-                                       i40e_aqc_opc_set_hmc_resource_profile);
-
-       cmd->pm_profile = (u8)profile;
-       cmd->pe_vf_enabled = pe_vf_enabled_count;
-
-       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-       return status;
-}
-
 /**
  * i40e_aq_request_resource
  * @hw: pointer to the hw struct
@@ -3138,6 +3176,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        p->wr_csr_prot = (u64)number;
                        p->wr_csr_prot |= (u64)logical_id << 32;
                        break;
+               case I40E_AQ_CAP_ID_NVM_MGMT:
+                       if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+                               p->sec_rev_disabled = true;
+                       if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+                               p->update_disabled = true;
+                       break;
                default:
                        break;
                }
index 83dccf1792e742fb6d37846d0af45af4e74865fe..e6af8c8d701972d40660d3391c90adab59b9d14f 100644 (file)
@@ -268,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         rx_ring->queue_index,
                         rx_ring->reg_idx);
                dev_info(&pf->pdev->dev,
-                        "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-                        i, rx_ring->rx_hdr_len,
-                        rx_ring->rx_buf_len,
-                        rx_ring->dtype);
+                        "    rx_rings[%i]: rx_buf_len = %d\n",
+                        i, rx_ring->rx_buf_len);
                dev_info(&pf->pdev->dev,
-                        "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                        i, ring_is_ps_enabled(rx_ring),
+                        "    rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i,
                         rx_ring->next_to_use,
                         rx_ring->next_to_clean,
                         rx_ring->ring_active);
@@ -325,9 +323,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         i, tx_ring->state,
                         tx_ring->queue_index,
                         tx_ring->reg_idx);
-               dev_info(&pf->pdev->dev,
-                        "    tx_rings[%i]: dtype = %d\n",
-                        i, tx_ring->dtype);
                dev_info(&pf->pdev->dev,
                         "    tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
                         i,
@@ -365,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 "    work_limit = %d\n",
                 vsi->work_limit);
        dev_info(&pf->pdev->dev,
-                "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
-                vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+                "    max_frame = %d, rx_buf_len = %d dtype = %d\n",
+                vsi->max_frame, vsi->rx_buf_len, 0);
        dev_info(&pf->pdev->dev,
                 "    num_q_vectors = %i, base_vector = %i\n",
                 vsi->num_q_vectors, vsi->base_vector);
@@ -591,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                                         "   d[%03x] = 0x%016llx 0x%016llx\n",
                                         i, txd->buffer_addr,
                                         txd->cmd_type_offset_bsz);
-                       } else if (sizeof(union i40e_rx_desc) ==
-                                  sizeof(union i40e_16byte_rx_desc)) {
-                               rxd = I40E_RX_DESC(ring, i);
-                               dev_info(&pf->pdev->dev,
-                                        "   d[%03x] = 0x%016llx 0x%016llx\n",
-                                        i, rxd->read.pkt_addr,
-                                        rxd->read.hdr_addr);
                        } else {
                                rxd = I40E_RX_DESC(ring, i);
                                dev_info(&pf->pdev->dev,
@@ -619,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                                 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
                                 vsi_seid, ring_id, desc_n,
                                 txd->buffer_addr, txd->cmd_type_offset_bsz);
-               } else if (sizeof(union i40e_rx_desc) ==
-                          sizeof(union i40e_16byte_rx_desc)) {
-                       rxd = I40E_RX_DESC(ring, desc_n);
-                       dev_info(&pf->pdev->dev,
-                                "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
-                                vsi_seid, ring_id, desc_n,
-                                rxd->read.pkt_addr, rxd->read.hdr_addr);
                } else {
                        rxd = I40E_RX_DESC(ring, desc_n);
                        dev_info(&pf->pdev->dev,
index dd4457d29e9828e09baaa4f66c112cfa646d1288..d701861c6e1eb38d55c84c886c8afbb54d304382 100644 (file)
@@ -45,6 +45,7 @@
 #define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
 #define I40E_DEV_ID_SFP_I_X722         0x37D3
+#define I40E_DEV_ID_QSFP_I_X722                0x37D4
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
index 8a83d45148124d12db9ada6bf0164300d338dd2e..51a994d858708d455a2f3062c822588e51427957 100644 (file)
@@ -235,7 +235,6 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "LinkPolling",
        "flow-director-atr",
        "veb-stats",
-       "packet-split",
        "hw-atr-eviction",
 };
 
@@ -1275,6 +1274,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
                }
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       /* this is to allow wr32 to have something to write to
+                        * during early allocation of Rx buffers
+                        */
+                       u32 __iomem faketail = 0;
+                       struct i40e_ring *ring;
+                       u16 unused;
+
                        /* clone ring and setup updated count */
                        rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
@@ -1283,12 +1289,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
                         */
                        rx_rings[i].desc = NULL;
                        rx_rings[i].rx_bi = NULL;
+                       rx_rings[i].tail = (u8 __iomem *)&faketail;
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
+                       if (err)
+                               goto rx_unwind;
+
+                       /* now allocate the Rx buffers to make sure the OS
+                        * has enough memory, any failure here means abort
+                        */
+                       ring = &rx_rings[i];
+                       unused = I40E_DESC_UNUSED(ring);
+                       err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
                        if (err) {
-                               while (i) {
-                                       i--;
+                               do {
                                        i40e_free_rx_resources(&rx_rings[i]);
-                               }
+                               } while (i--);
                                kfree(rx_rings);
                                rx_rings = NULL;
 
@@ -1314,6 +1330,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
        if (rx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        i40e_free_rx_resources(vsi->rx_rings[i]);
+                       /* get the real tail offset */
+                       rx_rings[i].tail = vsi->rx_rings[i]->tail;
+                       /* this is to fake out the allocation routine
+                        * into thinking it has to realloc everything
+                        * but the recycling logic will let us re-use
+                        * the buffers allocated above
+                        */
+                       rx_rings[i].next_to_use = 0;
+                       rx_rings[i].next_to_clean = 0;
+                       rx_rings[i].next_to_alloc = 0;
+                       /* do a struct copy */
                        *vsi->rx_rings[i] = rx_rings[i];
                }
                kfree(rx_rings);
@@ -2506,7 +2533,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 
        if (!vsi)
                return -EINVAL;
-
        pf = vsi->back;
 
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2564,15 +2590,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
 
        if (ntohl(fsp->m_ext.data[1])) {
-               if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
-                       netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+               vf_id = ntohl(fsp->h_ext.data[1]);
+               if (vf_id >= pf->num_alloc_vfs) {
+                       netif_info(pf, drv, vsi->netdev,
+                                  "Invalid VF id %d\n", vf_id);
                        goto free_input;
                }
-               vf_id = ntohl(fsp->h_ext.data[1]);
                /* Find vsi id from vf id and override dest vsi */
                input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
                if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
-                       netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+                       netif_info(pf, drv, vsi->netdev,
+                                  "Invalid queue id %d for VF %d\n",
+                                  input->q_index, vf_id);
                        goto free_input;
                }
        }
@@ -2827,8 +2856,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
                I40E_PRIV_FLAGS_FD_ATR : 0;
        ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
                I40E_PRIV_FLAGS_VEB_STATS : 0;
-       ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
-               I40E_PRIV_FLAGS_PS : 0;
        ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
                0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
 
@@ -2849,23 +2876,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 
        /* NOTE: MFP is not settable */
 
-       /* allow the user to control the method of receive
-        * buffer DMA, whether the packet is split at header
-        * boundaries into two separate buffers.  In some cases
-        * one routine or the other will perform better.
-        */
-       if ((flags & I40E_PRIV_FLAGS_PS) &&
-           !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
-               pf->flags |= I40E_FLAG_RX_PS_ENABLED;
-               pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
-               reset_required = true;
-       } else if (!(flags & I40E_PRIV_FLAGS_PS) &&
-                  (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
-               pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
-               pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-               reset_required = true;
-       }
-
        if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
                pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
        else
index dc3b3939dd0a03b02b55919335ba7c1bf47133e3..46a3a674c635b98ba4aaf1dc038279401df08f0d 100644 (file)
@@ -46,7 +46,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 5
+#define DRV_VERSION_BUILD 10
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -91,6 +91,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
        /* required last entry */
@@ -327,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
                unsigned long trans_start;
 
                q = netdev_get_tx_queue(netdev, i);
-               trans_start = q->trans_start ? : netdev->trans_start;
+               trans_start = q->trans_start;
                if (netif_xmit_stopped(q) &&
                    time_after(jiffies,
                               (trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +397,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
        pf->tx_timeout_recovery_level++;
 }
 
-/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
-       rx_ring->next_to_use = val;
-
-       /* Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-       writel(val, rx_ring->tail);
-}
-
 /**
  * i40e_get_vsi_stats_struct - Get System Network Statistics
  * @vsi: the VSI we care about
@@ -2098,6 +2081,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                }
        }
 
+       /* if the VF is not trusted do not do promisc */
+       if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+               clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+               goto out;
+       }
+
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
@@ -2866,34 +2855,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
        ring->rx_buf_len = vsi->rx_buf_len;
-       ring->rx_hdr_len = vsi->rx_hdr_len;
 
        rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
-       rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
        rx_ctx.base = (ring->dma / 128);
        rx_ctx.qlen = ring->count;
 
-       if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
-               set_ring_16byte_desc_enabled(ring);
-               rx_ctx.dsize = 0;
-       } else {
-               rx_ctx.dsize = 1;
-       }
+       /* use 32 byte descriptors */
+       rx_ctx.dsize = 1;
 
-       rx_ctx.dtype = vsi->dtype;
-       if (vsi->dtype) {
-               set_ring_ps_enabled(ring);
-               rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
-                                 I40E_RX_SPLIT_IP      |
-                                 I40E_RX_SPLIT_TCP_UDP |
-                                 I40E_RX_SPLIT_SCTP;
-       } else {
-               rx_ctx.hsplit_0 = 0;
-       }
+       /* descriptor type is always zero
+        * rx_ctx.dtype = 0;
+        */
+       rx_ctx.hsplit_0 = 0;
 
-       rx_ctx.rxmax = min_t(u16, vsi->max_frame,
-                                 (chain_len * ring->rx_buf_len));
+       rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
        if (hw->revision_id == 0)
                rx_ctx.lrxqthresh = 0;
        else
@@ -2930,12 +2906,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
-       if (ring_is_ps_enabled(ring)) {
-               i40e_alloc_rx_headers(ring);
-               i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
-       } else {
-               i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
-       }
+       i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
 
        return 0;
 }
@@ -2974,40 +2945,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
        else
                vsi->max_frame = I40E_RXBUFFER_2048;
 
-       /* figure out correct receive buffer length */
-       switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
-                                   I40E_FLAG_RX_PS_ENABLED)) {
-       case I40E_FLAG_RX_1BUF_ENABLED:
-               vsi->rx_hdr_len = 0;
-               vsi->rx_buf_len = vsi->max_frame;
-               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
-               break;
-       case I40E_FLAG_RX_PS_ENABLED:
-               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-               vsi->rx_buf_len = I40E_RXBUFFER_2048;
-               vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
-               break;
-       default:
-               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-               vsi->rx_buf_len = I40E_RXBUFFER_2048;
-               vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
-               break;
-       }
+       vsi->rx_buf_len = I40E_RXBUFFER_2048;
 
 #ifdef I40E_FCOE
        /* setup rx buffer for FCoE */
        if ((vsi->type == I40E_VSI_FCOE) &&
            (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
-               vsi->rx_hdr_len = 0;
                vsi->rx_buf_len = I40E_RXBUFFER_3072;
                vsi->max_frame = I40E_RXBUFFER_3072;
-               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
        }
 
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
-       vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
                                BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
@@ -7523,10 +7472,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                rx_ring->count = vsi->num_desc;
                rx_ring->size = 0;
                rx_ring->dcb_tc = 0;
-               if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
-                       set_ring_16byte_desc_enabled(rx_ring);
-               else
-                       clear_ring_16byte_desc_enabled(rx_ring);
                rx_ring->rx_itr_setting = pf->rx_itr_default;
                vsi->rx_rings[i] = rx_ring;
        }
@@ -8082,24 +8027,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
+       u16 vf_id = vsi->vf_id;
        u8 i;
 
        /* Fill out hash function seed */
        if (seed) {
                u32 *seed_dw = (u32 *)seed;
 
-               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                       i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+               if (vsi->type == I40E_VSI_MAIN) {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+                                                 seed_dw[i]);
+               } else if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw,
+                                                 I40E_VFQF_HKEY1(i, vf_id),
+                                                 seed_dw[i]);
+               } else {
+                       dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+               }
        }
 
        if (lut) {
                u32 *lut_dw = (u32 *)lut;
 
-               if (lut_size != I40E_HLUT_ARRAY_SIZE)
-                       return -EINVAL;
-
-               for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
-                       wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               if (vsi->type == I40E_VSI_MAIN) {
+                       if (lut_size != I40E_HLUT_ARRAY_SIZE)
+                               return -EINVAL;
+                       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+                               wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               } else if (vsi->type == I40E_VSI_SRIOV) {
+                       if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+                               return -EINVAL;
+                       for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw,
+                                                 I40E_VFQF_HLUT1(i, vf_id),
+                                                 lut_dw[i]);
+               } else {
+                       dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+               }
        }
        i40e_flush(hw);
 
@@ -8450,11 +8416,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    I40E_FLAG_MSI_ENABLED     |
                    I40E_FLAG_MSIX_ENABLED;
 
-       if (iommu_present(&pci_bus_type))
-               pf->flags |= I40E_FLAG_RX_PS_ENABLED;
-       else
-               pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
        /* Set default ITR */
        pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
        pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -8555,6 +8516,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
                             I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
                             I40E_FLAG_WB_ON_ITR_CAPABLE |
                             I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+                            I40E_FLAG_NO_PCI_LINK_CHECK |
                             I40E_FLAG_100M_SGMII_CAPABLE |
                             I40E_FLAG_USE_SET_LLDP_MIB |
                             I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
@@ -9110,40 +9072,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM             |
-                                  NETIF_F_IPV6_CSUM           |
-                                  NETIF_F_TSO                 |
-                                  NETIF_F_TSO6                |
-                                  NETIF_F_TSO_ECN             |
-                                  NETIF_F_GSO_GRE             |
-                                  NETIF_F_GSO_UDP_TUNNEL      |
-                                  NETIF_F_GSO_UDP_TUNNEL_CSUM |
+       netdev->hw_enc_features |= NETIF_F_SG                   |
+                                  NETIF_F_IP_CSUM              |
+                                  NETIF_F_IPV6_CSUM            |
+                                  NETIF_F_HIGHDMA              |
+                                  NETIF_F_SOFT_FEATURES        |
+                                  NETIF_F_TSO                  |
+                                  NETIF_F_TSO_ECN              |
+                                  NETIF_F_TSO6                 |
+                                  NETIF_F_GSO_GRE              |
+                                  NETIF_F_GSO_GRE_CSUM         |
+                                  NETIF_F_GSO_IPIP             |
+                                  NETIF_F_GSO_SIT              |
+                                  NETIF_F_GSO_UDP_TUNNEL       |
+                                  NETIF_F_GSO_UDP_TUNNEL_CSUM  |
+                                  NETIF_F_GSO_PARTIAL          |
+                                  NETIF_F_SCTP_CRC             |
+                                  NETIF_F_RXHASH               |
+                                  NETIF_F_RXCSUM               |
                                   0;
 
-       netdev->features = NETIF_F_SG                  |
-                          NETIF_F_IP_CSUM             |
-                          NETIF_F_SCTP_CRC            |
-                          NETIF_F_HIGHDMA             |
-                          NETIF_F_GSO_UDP_TUNNEL      |
-                          NETIF_F_GSO_GRE             |
-                          NETIF_F_HW_VLAN_CTAG_TX     |
-                          NETIF_F_HW_VLAN_CTAG_RX     |
-                          NETIF_F_HW_VLAN_CTAG_FILTER |
-                          NETIF_F_IPV6_CSUM           |
-                          NETIF_F_TSO                 |
-                          NETIF_F_TSO_ECN             |
-                          NETIF_F_TSO6                |
-                          NETIF_F_RXCSUM              |
-                          NETIF_F_RXHASH              |
-                          0;
+       if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+               netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+       netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+       /* record features VLANs can make use of */
+       netdev->vlan_features |= netdev->hw_enc_features |
+                                NETIF_F_TSO_MANGLEID;
 
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-               netdev->features |= NETIF_F_NTUPLE;
-       if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
-               netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               netdev->hw_features |= NETIF_F_NTUPLE;
+
+       netdev->hw_features |= netdev->hw_enc_features  |
+                              NETIF_F_HW_VLAN_CTAG_TX  |
+                              NETIF_F_HW_VLAN_CTAG_RX;
 
-       /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
+       netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
        if (vsi->type == I40E_VSI_MAIN) {
                SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9182,12 +9148,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 
        ether_addr_copy(netdev->dev_addr, mac_addr);
        ether_addr_copy(netdev->perm_addr, mac_addr);
-       /* vlan gets same features (except vlan offload)
-        * after any tweaks for specific VSI types
-        */
-       netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
-                                                    NETIF_F_HW_VLAN_CTAG_RX |
-                                                  NETIF_F_HW_VLAN_CTAG_FILTER);
+
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
        /* Setup netdev TC information */
@@ -10686,11 +10647,9 @@ static void i40e_print_features(struct i40e_pf *pf)
 #ifdef CONFIG_PCI_IOV
        i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
 #endif
-       i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+       i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
                      pf->hw.func_caps.num_vsis,
-                     pf->vsi[pf->lan_vsi]->num_queue_pairs,
-                     pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+                     pf->vsi[pf->lan_vsi]->num_queue_pairs);
        if (pf->flags & I40E_FLAG_RSS_ENABLED)
                i += snprintf(&buf[i], REMAIN(i), " RSS");
        if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
index f2cea3d25de31bf06899bc5a11a02d0d6f8ed192..954efe3118dbb15fcf436e5a56f0a98499d165a4 100644 (file)
@@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
        /* early check for status command and debug msgs */
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
-       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
                   i40e_nvm_update_state_str[upd_cmd],
                   hw->nvmupd_state,
-                  hw->nvm_release_on_done,
+                  hw->nvm_release_on_done, hw->nvm_wait_opcode,
                   cmd->command, cmd->config, cmd->offset, cmd->data_size);
 
        if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
         * going into the state machine
         */
        if (upd_cmd == I40E_NVMUPD_STATUS) {
+               if (!cmd->data_size) {
+                       *perrno = -EFAULT;
+                       return I40E_ERR_BUF_TOO_SHORT;
+               }
+
                bytes[0] = hw->nvmupd_state;
+
+               if (cmd->data_size >= 4) {
+                       bytes[1] = 0;
+                       *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+               }
+
                return 0;
        }
 
@@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 
        case I40E_NVMUPD_STATE_INIT_WAIT:
        case I40E_NVMUPD_STATE_WRITE_WAIT:
+               /* if we need to stop waiting for an event, clear
+                * the wait info and return before doing anything else
+                */
+               if (cmd->offset == 0xffff) {
+                       i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+                       return 0;
+               }
+
                status = I40E_ERR_NOT_READY;
                *perrno = -EBUSY;
                break;
@@ -800,6 +819,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                i40e_release_nvm(hw);
                        } else {
                                hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -816,6 +836,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                i40e_release_nvm(hw);
                        } else {
                                hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                                     hw->aq.asq_last_status);
                } else {
                        status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-                       if (status)
+                       if (status) {
                                i40e_release_nvm(hw);
-                       else
+                       } else {
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+                       }
                }
                break;
 
@@ -850,6 +873,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                i40e_release_nvm(hw);
                        } else {
                                hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -940,8 +964,10 @@ retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-               if (!status)
+               if (!status) {
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+               }
                break;
 
        case I40E_NVMUPD_WRITE_LCB:
@@ -954,6 +980,7 @@ retry:
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
                        hw->nvm_release_on_done = true;
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
@@ -967,6 +994,7 @@ retry:
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                }
                break;
@@ -981,6 +1009,7 @@ retry:
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
                        hw->nvm_release_on_done = true;
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
@@ -1036,14 +1065,14 @@ retry:
  **/
 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
 {
-       if (opcode == i40e_aqc_opc_nvm_erase ||
-           opcode == i40e_aqc_opc_nvm_update) {
+       if (opcode == hw->nvm_wait_opcode) {
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
                if (hw->nvm_release_on_done) {
                        i40e_release_nvm(hw);
                        hw->nvm_release_on_done = false;
                }
+               hw->nvm_wait_opcode = 0;
 
                switch (hw->nvmupd_state) {
                case I40E_NVMUPD_STATE_INIT_WAIT:
@@ -1220,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
+       /* should we wait for a followup event? */
+       if (cmd->offset) {
+               hw->nvm_wait_opcode = cmd->offset;
+               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+       }
+
        return status;
 }
 
index 134035f53f2c2a9dc414cc3ba27a49c966205636..4c8977c805dfa6af54e14e09877b7d7708be63d0 100644 (file)
@@ -133,6 +133,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                                                        u16 seid, bool enable,
+                                                        u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                                                        u16 seid, bool enable,
+                                                        u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
                                u16 seid, bool enable,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -228,10 +236,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile profile,
-                               u8 pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
                                u16 seid, u16 credit, u8 max_bw,
                                struct i40e_asq_cmd_details *cmd_details);
index 565ca7c835bc3989459065e129254e0a16eadba6..a1b878abd5b067489f431e0356b120b405e723bf 100644 (file)
@@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now, then = ns_to_timespec64(delta);
+       struct timespec64 now, then;
        unsigned long flags;
 
+       then = ns_to_timespec64(delta);
        spin_lock_irqsave(&pf->tmreg_lock, flags);
 
        i40e_ptp_read(pf, &now);
index 29ffed27e5a9f21decd19851a92c44615153d1c3..b0edffe88492d1e6372bf7187032081abc4acdb0 100644 (file)
@@ -1024,7 +1024,6 @@ err:
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
        unsigned long bi_size;
        u16 i;
 
@@ -1032,48 +1031,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
-       if (ring_is_ps_enabled(rx_ring)) {
-               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
-               rx_bi = &rx_ring->rx_bi[0];
-               if (rx_bi->hdr_buf) {
-                       dma_free_coherent(dev,
-                                         bufsz,
-                                         rx_bi->hdr_buf,
-                                         rx_bi->dma);
-                       for (i = 0; i < rx_ring->count; i++) {
-                               rx_bi = &rx_ring->rx_bi[i];
-                               rx_bi->dma = 0;
-                               rx_bi->hdr_buf = NULL;
-                       }
-               }
-       }
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               if (rx_bi->dma) {
-                       dma_unmap_single(dev,
-                                        rx_bi->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_bi->dma = 0;
-               }
+               struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
                if (rx_bi->skb) {
                        dev_kfree_skb(rx_bi->skb);
                        rx_bi->skb = NULL;
                }
-               if (rx_bi->page) {
-                       if (rx_bi->page_dma) {
-                               dma_unmap_page(dev,
-                                              rx_bi->page_dma,
-                                              PAGE_SIZE,
-                                              DMA_FROM_DEVICE);
-                               rx_bi->page_dma = 0;
-                       }
-                       __free_page(rx_bi->page);
-                       rx_bi->page = NULL;
-                       rx_bi->page_offset = 0;
-               }
+               if (!rx_bi->page)
+                       continue;
+
+               dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_pages(rx_bi->page, 0);
+
+               rx_bi->page = NULL;
+               rx_bi->page_offset = 0;
        }
 
        bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1082,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 }
@@ -1105,37 +1079,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
-/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
-       struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
-       dma_addr_t dma;
-       void *buffer;
-       int buf_size;
-       int i;
-
-       if (rx_ring->rx_bi[0].hdr_buf)
-               return;
-       /* Make sure the buffers don't cross cache line boundaries. */
-       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
-       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
-                                   &dma, GFP_KERNEL);
-       if (!buffer)
-               return;
-       for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               rx_bi->dma = dma + (i * buf_size);
-               rx_bi->hdr_buf = buffer + (i * buf_size);
-       }
-}
-
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1157,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        u64_stats_init(&rx_ring->syncp);
 
        /* Round up to nearest 4K */
-       rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
-               ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
-               : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+       rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
        rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
@@ -1170,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
                goto err;
        }
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
@@ -1188,6 +1130,10 @@ err:
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
        rx_ring->next_to_use = val;
+
+       /* update next to alloc since we have filled the ring */
+       rx_ring->next_to_alloc = val;
+
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -1198,160 +1144,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 }
 
 /**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
  *
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
  **/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+                                  struct i40e_rx_buffer *bi)
 {
-       u16 i = rx_ring->next_to_use;
-       union i40e_rx_desc *rx_desc;
-       struct i40e_rx_buffer *bi;
-       const int current_node = numa_node_id();
+       struct page *page = bi->page;
+       dma_addr_t dma;
 
-       /* do nothing if no valid netdev defined */
-       if (!rx_ring->netdev || !cleaned_count)
-               return false;
+       /* since we are recycling buffers we should seldom need to alloc */
+       if (likely(page)) {
+               rx_ring->rx_stats.page_reuse_count++;
+               return true;
+       }
 
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
+       /* alloc new page for storage */
+       page = dev_alloc_page();
+       if (unlikely(!page)) {
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
+       }
 
-               if (bi->skb) /* desc is in use */
-                       goto no_buffers;
+       /* map page for use */
+       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 
-       /* If we've been moved to a different NUMA node, release the
-        * page so we can get a new one on the current node.
+       /* if mapping failed free memory back to system since
+        * there isn't much point in holding memory we can't use
         */
-               if (bi->page &&  page_to_nid(bi->page) != current_node) {
-                       dma_unmap_page(rx_ring->dev,
-                                      bi->page_dma,
-                                      PAGE_SIZE,
-                                      DMA_FROM_DEVICE);
-                       __free_page(bi->page);
-                       bi->page = NULL;
-                       bi->page_dma = 0;
-                       rx_ring->rx_stats.realloc_count++;
-               } else if (bi->page) {
-                       rx_ring->rx_stats.page_reuse_count++;
-               }
-
-               if (!bi->page) {
-                       bi->page = alloc_page(GFP_ATOMIC);
-                       if (!bi->page) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               goto no_buffers;
-                       }
-                       bi->page_dma = dma_map_page(rx_ring->dev,
-                                                   bi->page,
-                                                   0,
-                                                   PAGE_SIZE,
-                                                   DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               __free_page(bi->page);
-                               bi->page = NULL;
-                               bi->page_dma = 0;
-                               bi->page_offset = 0;
-                               goto no_buffers;
-                       }
-                       bi->page_offset = 0;
-               }
-
-               /* Refresh the desc even if buffer_addrs didn't change
-                * because each write-back erases this info.
-                */
-               rx_desc->read.pkt_addr =
-                               cpu_to_le64(bi->page_dma + bi->page_offset);
-               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
+       if (dma_mapping_error(rx_ring->dev, dma)) {
+               __free_pages(page, 0);
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
        }
 
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+       bi->dma = dma;
+       bi->page = page;
+       bi->page_offset = 0;
 
-       return false;
+       return true;
+}
 
-no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+                            struct sk_buff *skb, u16 vlan_tag)
+{
+       struct i40e_q_vector *q_vector = rx_ring->q_vector;
 
-       /* make sure to come back via polling to try again after
-        * allocation failure
-        */
-       return true;
+       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (vlan_tag & VLAN_VID_MASK))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40e_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
  *
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
  **/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
-       u16 i = rx_ring->next_to_use;
+       u16 ntu = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
        struct i40e_rx_buffer *bi;
-       struct sk_buff *skb;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
                return false;
 
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
-               skb = bi->skb;
-
-               if (!skb) {
-                       skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                         rx_ring->rx_buf_len,
-                                                         GFP_ATOMIC |
-                                                         __GFP_NOWARN);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               goto no_buffers;
-                       }
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       bi->skb = skb;
-               }
+       rx_desc = I40E_RX_DESC(rx_ring, ntu);
+       bi = &rx_ring->rx_bi[ntu];
 
-               if (!bi->dma) {
-                       bi->dma = dma_map_single(rx_ring->dev,
-                                                skb->data,
-                                                rx_ring->rx_buf_len,
-                                                DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               bi->dma = 0;
-                               dev_kfree_skb(bi->skb);
-                               bi->skb = NULL;
-                               goto no_buffers;
-                       }
-               }
+       do {
+               if (!i40e_alloc_mapped_page(rx_ring, bi))
+                       goto no_buffers;
 
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
                rx_desc->read.hdr_addr = 0;
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
-       }
 
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+               rx_desc++;
+               bi++;
+               ntu++;
+               if (unlikely(ntu == rx_ring->count)) {
+                       rx_desc = I40E_RX_DESC(rx_ring, 0);
+                       bi = rx_ring->rx_bi;
+                       ntu = 0;
+               }
+
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->wb.qword1.status_error_len = 0;
+
+               cleaned_count--;
+       } while (cleaned_count);
+
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
 
        return false;
 
 no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
 
        /* make sure to come back via polling to try again after
         * allocation failure
@@ -1359,42 +1267,36 @@ no_buffers:
        return true;
 }
 
-/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring:  rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
-                            struct sk_buff *skb, u16 vlan_tag)
-{
-       struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
-       if (vlan_tag & VLAN_VID_MASK)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-       napi_gro_receive(&q_vector->napi, skb);
-}
-
 /**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
-                                   u32 rx_status,
-                                   u32 rx_error,
-                                   u16 rx_ptype)
+                                   union i40e_rx_desc *rx_desc)
 {
-       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
-       bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+       struct i40e_rx_ptype_decoded decoded;
+       bool ipv4, ipv6, tunnel = false;
+       u32 rx_error, rx_status;
+       u8 ptype;
+       u64 qword;
+
+       qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+       rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                  I40E_RXD_QW1_ERROR_SHIFT;
+       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                   I40E_RXD_QW1_STATUS_SHIFT;
+       decoded = decode_rx_desc_ptype(ptype);
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       skb_checksum_none_assert(skb);
+
        /* Rx csum enabled and ip headers found? */
        if (!(vsi->netdev->features & NETIF_F_RXCSUM))
                return;
@@ -1440,14 +1342,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * doesn't make it a hard requirement so if we have validated the
         * inner checksum report CHECKSUM_UNNECESSARY.
         */
-
-       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+                                 I40E_RX_PTYPE_INNER_PROT_UDP |
+                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
+               tunnel = true;
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+       skb->csum_level = tunnel ? 1 : 0;
 
        return;
 
@@ -1461,7 +1362,7 @@ checksum_fail:
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
 {
        struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -1489,7 +1390,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
                                u8 rx_ptype)
 {
        u32 hash;
-       const __le64 rss_mask  =
+       const __le64 rss_mask =
                cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
                            I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
@@ -1503,338 +1404,419 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
 }
 
 /**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
  *
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+                            u8 rx_ptype)
 {
-       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
-       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u16 i = rx_ring->next_to_clean;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       bool failure = false;
-       u8 rx_ptype;
-       u64 qword;
-       u32 copysize;
+       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+       u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+                  I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
 
-       if (budget <= 0)
-               return 0;
+       if (unlikely(rsyn)) {
+               i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+               rx_ring->last_rx_timestamp = jiffies;
+       }
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
-               struct sk_buff *skb;
-               u16 vlan_tag;
-               /* return some buffers to hardware, one at a time is too slow */
-               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       failure = failure ||
-                                 i40e_alloc_rx_buffers_ps(rx_ring,
-                                                          cleaned_count);
-                       cleaned_count = 0;
-               }
+       i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+       /* modifies the skb - consumes the enet header */
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
-                       break;
+       i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
-               /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * DD bit is set.
-                */
-               dma_rmb();
-               /* sync header buffer for reading */
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             rx_ring->rx_bi[0].dma,
-                                             i * rx_ring->rx_hdr_len,
-                                             rx_ring->rx_hdr_len,
-                                             DMA_FROM_DEVICE);
-               if (i40e_rx_is_programming_status(qword)) {
-                       i40e_clean_programming_status(rx_ring, rx_desc);
-                       I40E_RX_INCREMENT(rx_ring, i);
-                       continue;
-               }
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               if (likely(!skb)) {
-                       skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                         rx_ring->rx_hdr_len,
-                                                         GFP_ATOMIC |
-                                                         __GFP_NOWARN);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               failure = true;
-                               break;
-                       }
+       skb_record_rx_queue(skb, rx_ring->queue_index);
+}
 
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_ring->rx_bi[0].dma,
-                                                     i * rx_ring->rx_hdr_len,
-                                                     rx_ring->rx_hdr_len,
-                                                     DMA_FROM_DEVICE);
-               }
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-               rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-               rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
-                        I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               /* sync half-page for reading */
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             rx_bi->page_dma,
-                                             rx_bi->page_offset,
-                                             PAGE_SIZE / 2,
-                                             DMA_FROM_DEVICE);
-               prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
-               rx_bi->skb = NULL;
-               cleaned_count++;
-               copysize = 0;
-               if (rx_hbo || rx_sph) {
-                       int len;
+       /* it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
 
-                       if (rx_hbo)
-                               len = I40E_RX_HDR_SIZE;
-                       else
-                               len = rx_header_len;
-                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
-               } else if (skb->len == 0) {
-                       int len;
-                       unsigned char *va = page_address(rx_bi->page) +
-                                           rx_bi->page_offset;
-
-                       len = min(rx_packet_len, rx_ring->rx_hdr_len);
-                       memcpy(__skb_put(skb, len), va, len);
-                       copysize = len;
-                       rx_packet_len -= len;
-               }
-               /* Get the rest of the data if this was a header split */
-               if (rx_packet_len) {
-                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                                       rx_bi->page,
-                                       rx_bi->page_offset + copysize,
-                                       rx_packet_len, I40E_RXBUFFER_2048);
-
-                       /* If the page count is more than 2, then both halves
-                        * of the page are used and we need to free it. Do it
-                        * here instead of in the alloc code. Otherwise one
-                        * of the half-pages might be released between now and
-                        * then, and we wouldn't know which one to use.
-                        * Don't call get_page and free_page since those are
-                        * both expensive atomic operations that just change
-                        * the refcount in opposite directions. Just give the
-                        * page to the stack; he can have our refcount.
-                        */
-                       if (page_count(rx_bi->page) > 2) {
-                               dma_unmap_page(rx_ring->dev,
-                                              rx_bi->page_dma,
-                                              PAGE_SIZE,
-                                              DMA_FROM_DEVICE);
-                               rx_bi->page = NULL;
-                               rx_bi->page_dma = 0;
-                               rx_ring->rx_stats.realloc_count++;
-                       } else {
-                               get_page(rx_bi->page);
-                               /* switch to the other half-page here; the
-                                * allocation code programs the right addr
-                                * into HW. If we haven't used this half-page,
-                                * the address won't be changed, and HW can
-                                * just use it next time through.
-                                */
-                               rx_bi->page_offset ^= PAGE_SIZE / 2;
-                       }
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
 
-               }
-               I40E_RX_INCREMENT(rx_ring, i);
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
 
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       struct i40e_rx_buffer *next_buffer;
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
 
-                       next_buffer = &rx_ring->rx_bi[i];
-                       next_buffer->skb = skb;
-                       rx_ring->rx_stats.non_eop_descs++;
-                       continue;
-               }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               i40e_pull_tail(rx_ring, skb);
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+       /* if eth_skb_pad returns an error the skb was freed */
+       if (eth_skb_pad(skb))
+               return true;
 
-               i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+       return false;
+}
 
-               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-                       rx_ring->last_rx_timestamp = jiffies;
-               }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *old_buff)
+{
+       struct i40e_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
 
-               /* probably a little skewed due to removing CRC */
-               total_rx_bytes += skb->len;
-               total_rx_packets++;
+       new_buff = &rx_ring->rx_bi[nta];
 
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+       /* transfer page from old buffer to new buffer */
+       *new_buff = *old_buff;
+}
 
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
-#ifdef I40E_FCOE
-               if (unlikely(
-                   i40e_rx_is_fcoe(rx_ptype) &&
-                   !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+                            struct i40e_rx_buffer *rx_buffer,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb)
+{
+       struct page *page = rx_buffer->page;
+       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                           I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
 #endif
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_desc->wb.qword1.status_error_len = 0;
+       /* will the data fit in the skb we allocated? if so, just
+        * copy it as it is pretty small anyway
+        */
+       if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
 
-       } while (likely(total_rx_packets < budget));
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-       u64_stats_update_begin(&rx_ring->syncp);
-       rx_ring->stats.packets += total_rx_packets;
-       rx_ring->stats.bytes += total_rx_bytes;
-       u64_stats_update_end(&rx_ring->syncp);
-       rx_ring->q_vector->rx.total_packets += total_rx_packets;
-       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!i40e_page_is_reserved(page)))
+                       return true;
 
-       return failure ? budget : total_rx_packets;
+               /* this page cannot be reused so discard it */
+               __free_pages(page, 0);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(i40e_page_is_reserved(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       get_page(rx_buffer->page);
+
+       return true;
 }
 
 /**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
  *
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+                                    union i40e_rx_desc *rx_desc)
+{
+       struct i40e_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                                      I40E_RX_HDR_SIZE,
+                                      GFP_ATOMIC | __GFP_NOWARN);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_buff_failed++;
+                       return NULL;
+               }
+
+               /* we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+       } else {
+               rx_buffer->skb = NULL;
+       }
+
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     I40E_RXBUFFER_2048,
+                                     DMA_FROM_DEVICE);
+
+       /* pull page into skb */
+       if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               i40e_reuse_rx_page(rx_ring, rx_buffer);
+               rx_ring->rx_stats.page_reuse_count++;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->page = NULL;
+
+       return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+                           union i40e_rx_desc *rx_desc,
+                           struct sk_buff *skb)
+{
+       u32 ntc = rx_ring->next_to_clean + 1;
+
+       /* fetch, update, and store next to clean */
+       ntc = (ntc < rx_ring->count) ? ntc : 0;
+       rx_ring->next_to_clean = ntc;
+
+       prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+       if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+               i40e_clean_programming_status(rx_ring, rx_desc);
+               rx_ring->rx_bi[ntc].skb = skb;
+               return true;
+       }
+       /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+       if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+               return false;
+
+       /* place skb in next buffer to be received */
+       rx_ring->rx_bi[ntc].skb = skb;
+       rx_ring->rx_stats.non_eop_descs++;
+
+       return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
  **/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       u16 rx_packet_len;
        bool failure = false;
-       u8 rx_ptype;
-       u64 qword;
-       u16 i;
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
+       while (likely(total_rx_packets < budget)) {
+               union i40e_rx_desc *rx_desc;
                struct sk_buff *skb;
+               u32 rx_status;
                u16 vlan_tag;
+               u8 rx_ptype;
+               u64 qword;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
                        failure = failure ||
-                                 i40e_alloc_rx_buffers_1buf(rx_ring,
-                                                            cleaned_count);
+                                 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
+               rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+                           I40E_RXD_QW1_STATUS_SHIFT;
 
                if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
+               /* status_error_len will always be zero for unused descriptors
+                * because it's cleared in cleanup, and overlaps with hdr_addr
+                * which is always zero because packet split isn't used, if the
+                * hardware wrote DD then it will be non-zero
+                */
+               if (!rx_desc->wb.qword1.status_error_len)
+                       break;
+
                /* This memory barrier is needed to keep us from reading
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
                dma_rmb();
 
-               if (i40e_rx_is_programming_status(qword)) {
-                       i40e_clean_programming_status(rx_ring, rx_desc);
-                       I40E_RX_INCREMENT(rx_ring, i);
-                       continue;
-               }
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               prefetch(skb->data);
-
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+               if (!skb)
+                       break;
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               rx_bi->skb = NULL;
                cleaned_count++;
 
-               /* Get the header and possibly the whole packet
-                * If this is an skb from previous receive dma will be 0
-                */
-               skb_put(skb, rx_packet_len);
-               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
-                                DMA_FROM_DEVICE);
-               rx_bi->dma = 0;
-
-               I40E_RX_INCREMENT(rx_ring, i);
-
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       rx_ring->rx_stats.non_eop_descs++;
+               if (i40e_is_non_eop(rx_ring, rx_desc, skb))
                        continue;
-               }
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               /* ERR_MASK will only have valid bits if EOP set, and
+                * what we are doing here is actually checking
+                * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+                * the error field
+                */
+               if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
 
-               i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-                       rx_ring->last_rx_timestamp = jiffies;
-               }
+               if (i40e_cleanup_headers(rx_ring, skb))
+                       continue;
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
-               total_rx_packets++;
-
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+               /* populate checksum, VLAN, and protocol */
+               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
 #ifdef I40E_FCOE
                if (unlikely(
                    i40e_rx_is_fcoe(rx_ptype) &&
@@ -1843,10 +1825,15 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                        continue;
                }
 #endif
+
+               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_desc->wb.qword1.status_error_len = 0;
-       } while (likely(total_rx_packets < budget));
+               /* update budget accounting */
+               total_rx_packets++;
+       }
 
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
@@ -1855,6 +1842,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
+       /* guarantee a trip back through this routine if there was a failure */
        return failure ? budget : total_rx_packets;
 }
 
@@ -1999,12 +1987,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
-               int cleaned;
-
-               if (ring_is_ps_enabled(ring))
-                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
-               else
-                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
 
                work_done += cleaned;
                /* if we clean as many as budgeted, we must not be done */
@@ -2299,9 +2282,16 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
                ip.v6->payload_len = 0;
        }
 
-       if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+       if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+                                        SKB_GSO_GRE_CSUM |
+                                        SKB_GSO_IPIP |
+                                        SKB_GSO_SIT |
+                                        SKB_GSO_UDP_TUNNEL |
                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
-               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+               if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+                       l4.udp->len = 0;
+
                        /* determine offset of outer transport header */
                        l4_offset = l4.hdr - skb->data;
 
@@ -2442,13 +2432,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                                 &l4_proto, &frag_off);
                }
 
-               /* compute outer L3 header size */
-               tunnel |= ((l4.hdr - ip.hdr) / 4) <<
-                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
-               /* switch IP header pointer from outer to inner header */
-               ip.hdr = skb_inner_network_header(skb);
-
                /* define outer transport */
                switch (l4_proto) {
                case IPPROTO_UDP:
@@ -2459,6 +2442,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                        tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
                        *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
                        break;
+               case IPPROTO_IPIP:
+               case IPPROTO_IPV6:
+                       *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+                       l4.hdr = skb_inner_network_header(skb);
+                       break;
                default:
                        if (*tx_flags & I40E_TX_FLAGS_TSO)
                                return -1;
@@ -2467,12 +2455,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                        return 0;
                }
 
+               /* compute outer L3 header size */
+               tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+               /* switch IP header pointer from outer to inner header */
+               ip.hdr = skb_inner_network_header(skb);
+
                /* compute tunnel header size */
                tunnel |= ((ip.hdr - l4.hdr) / 2) <<
                          I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
                /* indicate if we need to offload outer UDP header */
                if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+                   !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
                        tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
 
@@ -2600,35 +2596,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 }
 
 /**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -2637,21 +2632,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -2661,7 +2656,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index 77ccdde56c0c4b1c08672aafdd8ee6f96841e3c9..b78c810d18358b5dd9628ae089564e0c6a3ec4df 100644 (file)
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
        (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
          I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512   512    /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  */
-#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+                                    const u64 stat_err_bits)
+{
+       return !!(rx_desc->wb.qword1.status_error_len &
+                 cpu_to_le64(stat_err_bits));
+}
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t {
                prefetch((n));                          \
        } while (0)
 
-#define i40e_rx_desc i40e_32byte_rx_desc
-
 #define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 
@@ -213,10 +230,8 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
-       void *hdr_buf;
        dma_addr_t dma;
        struct page *page;
-       dma_addr_t page_dma;
        unsigned int page_offset;
 };
 
@@ -245,22 +260,18 @@ struct i40e_rx_queue_stats {
 enum i40e_ring_state_t {
        __I40E_TX_FDIR_INIT_DONE,
        __I40E_TX_XPS_INIT_DONE,
-       __I40E_RX_PS_ENABLED,
-       __I40E_RX_16BYTE_DESC_ENABLED,
 };
 
-#define ring_is_ps_enabled(ring) \
-       test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-       set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-       clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
-       test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
-       set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
-       clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
@@ -287,16 +298,7 @@ struct i40e_ring {
 
        u16 count;                      /* Number of descriptors */
        u16 reg_idx;                    /* HW register index of the ring */
-       u16 rx_hdr_len;
        u16 rx_buf_len;
-       u8  dtype;
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
 
        /* used in interrupt processing */
        u16 next_to_use;
@@ -330,6 +332,7 @@ struct i40e_ring {
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 
        struct rcu_head rcu;            /* to avoid race on free */
+       u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -353,9 +356,7 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -442,11 +443,15 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40e_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40e_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 
 /**
index 793036b259e5106676ad27f4ed80c7ea6ad49045..bd5f13bef83c794bb03435308b282831b9300c95 100644 (file)
@@ -36,7 +36,7 @@
 #include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
 
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
@@ -275,6 +275,11 @@ struct i40e_hw_capabilities {
 #define I40E_FLEX10_STATUS_DCC_ERROR   0x1
 #define I40E_FLEX10_STATUS_VC_MODE     0x2
 
+       bool sec_rev_disabled;
+       bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED  0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -550,6 +555,7 @@ struct i40e_hw {
        struct i40e_aq_desc nvm_wb_desc;
        struct i40e_virt_mem nvm_buff;
        bool nvm_release_on_done;
+       u16 nvm_wait_opcode;
 
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
index 30f8cbe6b54b00cf1d67b70fb3affccd5a213f92..a9b04e72df82142138a606970fcf6c5a9e1f7d93 100644 (file)
@@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
        int i;
 
        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
-               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
                /* Not all vfs are enabled so skip the ones that are not */
                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+       int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
            !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
                return;
 
-       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+       abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                }
                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
-               /* set splitalways mode 10b */
+               /* set split mode 10b */
                rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
        }
 
@@ -860,7 +860,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
        if (ret)
                goto error_alloc;
        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-       set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+       if (vf->trusted)
+               set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+       else
+               clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
        /* store the total qps number for the runtime
         * VF req validation
@@ -1348,12 +1352,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
                set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
        }
 
-       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
-               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-                       vfres->vf_offload_flags |=
-                               I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
        } else {
-               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+               if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+                   (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+               else
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
        }
 
        if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1382,6 +1390,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+       vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+       vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
        if (vf->lan_vsi_idx) {
                vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
                vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
@@ -1419,6 +1430,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
                i40e_reset_vf(vf, false);
 }
 
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f;
+       int num_vlans = 0;
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+                       num_vlans++;
+       }
+
+       return num_vlans;
+}
+
 /**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
@@ -1435,22 +1465,123 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
            (struct i40e_virtchnl_promisc_info *)msg;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
-       struct i40e_vsi *vsi;
+       struct i40e_mac_filter *f;
+       i40e_status aq_ret = 0;
        bool allmulti = false;
-       i40e_status aq_ret;
+       struct i40e_vsi *vsi;
+       bool alluni = false;
+       int aq_err = 0;
 
        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
-           !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-           (vsi->type != I40E_VSI_FCOE)) {
+           !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+               dev_err(&pf->pdev->dev,
+                       "VF %d doesn't meet requirements to enter promiscuous mode\n",
+                       vf->vf_id);
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
+       /* Multicast promiscuous handling*/
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
-       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
-                                                      allmulti, NULL);
+
+       if (vf->port_vlan_id) {
+               aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+                                                           allmulti,
+                                                           vf->port_vlan_id,
+                                                           NULL);
+       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+                               continue;
+                       aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+                                                                   vsi->seid,
+                                                                   allmulti,
+                                                                   f->vlan,
+                                                                   NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
+                       if (aq_ret) {
+                               dev_err(&pf->pdev->dev,
+                                       "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+                                       f->vlan,
+                                       i40e_stat_str(&pf->hw, aq_ret),
+                                       i40e_aq_str(&pf->hw, aq_err));
+                               break;
+                       }
+               }
+       } else {
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+                                                              allmulti, NULL);
+               aq_err = pf->hw.aq.asq_last_status;
+               if (aq_ret) {
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+                               vf->vf_id,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
+                       goto error_param_int;
+               }
+       }
+
+       if (!aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "VF %d successfully set multicast promiscuous mode\n",
+                        vf->vf_id);
+               if (allmulti)
+                       set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+               else
+                       clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+       }
+
+       if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+               alluni = true;
+       if (vf->port_vlan_id) {
+               aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+                                                           alluni,
+                                                           vf->port_vlan_id,
+                                                           NULL);
+       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       aq_ret = 0;
+                       if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+                               aq_ret =
+                               i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+                                                                  vsi->seid,
+                                                                  alluni,
+                                                                  f->vlan,
+                                                                  NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
+                       }
+                       if (aq_ret)
+                               dev_err(&pf->pdev->dev,
+                                       "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+                                       f->vlan,
+                                       i40e_stat_str(&pf->hw, aq_ret),
+                                       i40e_aq_str(&pf->hw, aq_err));
+               }
+       } else {
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+                                                            allmulti, NULL);
+               aq_err = pf->hw.aq.asq_last_status;
+               if (aq_ret)
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+                               vf->vf_id, info->flags,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
+       }
+
+error_param_int:
+       if (!aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "VF %d successfully set unicast promiscuous mode\n",
+                        vf->vf_id);
+               if (alluni)
+                       set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+               else
+                       clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+       }
 
 error_param:
        /* send the response to the VF */
@@ -1701,6 +1832,10 @@ error_param:
                                      (u8 *)&stats, sizeof(stats));
 }
 
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
 /**
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
@@ -1721,15 +1856,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
                ret = I40E_ERR_INVALID_MAC_ADDR;
        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+                  !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
                /* If the host VMM administrator has set the VF MAC address
                 * administratively via the ndo_set_vf_mac command then deny
                 * permission to the VF to add or delete unicast MAC addresses.
+                * Unless the VF is privileged and then it can do whatever.
                 * The VF may request to set the MAC address filter already
                 * assigned to it so do not return an error in that case.
                 */
                dev_err(&pf->pdev->dev,
-                       "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+                       "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+               ret = -EPERM;
+       } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+                  !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+               dev_err(&pf->pdev->dev,
+                       "VF is not trusted, switch the VF to trusted to add more functionality\n");
                ret = -EPERM;
        }
        return ret;
@@ -1754,7 +1896,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1793,6 +1934,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        ret = I40E_ERR_PARAM;
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
+               } else {
+                       vf->num_mac++;
                }
        }
        spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1828,7 +1971,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1852,6 +1994,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
+               } else {
+                       vf->num_mac--;
                }
 
        spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1886,8 +2030,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        i40e_status aq_ret = 0;
        int i;
 
+       if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+               dev_err(&pf->pdev->dev,
+                       "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+               goto error_param;
+       }
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1911,6 +2060,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        for (i = 0; i < vfl->num_elements; i++) {
                /* add new VLAN filter */
                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+               if (!ret)
+                       vf->num_vlan++;
+
+               if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          true,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
+               if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          true,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
 
                if (ret)
                        dev_err(&pf->pdev->dev,
@@ -1942,7 +2104,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1963,6 +2124,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 
        for (i = 0; i < vfl->num_elements; i++) {
                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+               if (!ret)
+                       vf->num_vlan--;
+
+               if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          false,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
+               if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          false,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
 
                if (ret)
                        dev_err(&pf->pdev->dev,
@@ -2041,6 +2215,135 @@ error_param:
                               aq_ret);
 }
 
+/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_key *vrk =
+               (struct i40e_virtchnl_rss_key *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vrk->vsi_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+           (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_lut *vrl =
+               (struct i40e_virtchnl_rss_lut *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vrl->vsi_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+           (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+       /* send the response to the VF */
+err:
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_hena *vrh = NULL;
+       struct i40e_pf *pf = vf->pf;
+       i40e_status aq_ret = 0;
+       int len = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+       len = sizeof(struct i40e_virtchnl_rss_hena);
+
+       vrh = kzalloc(len, GFP_KERNEL);
+       if (!vrh) {
+               aq_ret = I40E_ERR_NO_MEMORY;
+               len = 0;
+               goto err;
+       }
+       vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+       /* send the response back to the VF */
+       aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+                                       aq_ret, (u8 *)vrh, len);
+       return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_hena *vrh =
+               (struct i40e_virtchnl_rss_hena *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+       i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+       i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+                         (u32)(vrh->hena >> 32));
+
+       /* send the response to the VF */
+err:
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+                                      aq_ret);
+}
+
 /**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the VF info
@@ -2054,7 +2357,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                                   u32 v_retval, u8 *msg, u16 msglen)
 {
        bool err_msg_format = false;
-       int valid_len;
+       int valid_len = 0;
 
        /* Check if VF is disabled. */
        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2066,13 +2369,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
-               valid_len = 0;
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                if (VF_IS_V11(vf))
                        valid_len = sizeof(u32);
-               else
-                       valid_len = 0;
                break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2162,6 +2462,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                                sizeof(struct i40e_virtchnl_iwarp_qv_info));
                }
                break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+               valid_len = sizeof(struct i40e_virtchnl_rss_key);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_rss_key *vrk =
+                               (struct i40e_virtchnl_rss_key *)msg;
+                       if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+                               err_msg_format = true;
+                               break;
+                       }
+                       valid_len += vrk->key_len - 1;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+               valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_rss_lut *vrl =
+                               (struct i40e_virtchnl_rss_lut *)msg;
+                       if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+                               err_msg_format = true;
+                               break;
+                       }
+                       valid_len += vrl->lut_entries - 1;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+               break;
+       case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+               valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+               break;
        /* These are always errors coming from the VF. */
        case I40E_VIRTCHNL_OP_EVENT:
        case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2188,11 +2517,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
  * called from the common aeq/arq handler to
  * process request from VF
  **/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen)
 {
        struct i40e_hw *hw = &pf->hw;
-       unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+       int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
        struct i40e_vf *vf;
        int ret;
 
@@ -2260,6 +2589,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
        case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
                ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
                break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+               ret = i40e_vc_config_rss_key(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+               ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+               ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+               ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+               break;
+
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2281,9 +2623,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
  **/
 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 {
-       u32 reg, reg_idx, bit_idx, vf_id;
        struct i40e_hw *hw = &pf->hw;
+       u32 reg, reg_idx, bit_idx;
        struct i40e_vf *vf;
+       int vf_id;
 
        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
                return 0;
index 838cbd2299a427c7fce8d954811ab83f282417df..8751741414517e3ba37213347fc2c976c145c10f 100644 (file)
@@ -61,6 +61,8 @@ enum i40e_vf_states {
        I40E_VF_STAT_IWARPENA,
        I40E_VF_STAT_FCOEENA,
        I40E_VF_STAT_DISABLED,
+       I40E_VF_STAT_MC_PROMISC,
+       I40E_VF_STAT_UC_PROMISC,
 };
 
 /* VF capabilities */
@@ -75,7 +77,7 @@ struct i40e_vf {
        struct i40e_pf *pf;
 
        /* VF id in the PF space */
-       u16 vf_id;
+       s16 vf_id;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
        struct i40e_virtchnl_version_info vf_ver;
@@ -109,6 +111,9 @@ struct i40e_vf {
        bool link_forced;
        bool link_up;           /* only valid if VF link is forced */
        bool spoofchk;
+       u16 num_mac;
+       u16 num_vlan;
+
        /* RDMA Client */
        struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
 };
@@ -116,7 +121,7 @@ struct i40e_vf {
 void i40e_free_vfs(struct i40e_pf *pf);
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
 void i40e_reset_vf(struct i40e_vf *vf, bool flr);
index aad8d62771102e090fcdf22fae68722306c4f2d7..3114dcfa172458a9ea0a48d9f847fa86e435140f 100644 (file)
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
 #define I40E_AQ_FLAG_EI_SHIFT  14
 #define I40E_AQ_FLAG_FE_SHIFT  15
 
-#define I40E_AQ_FLAG_DD                (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP       (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR       (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE       (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB                (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD                (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC       (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF       (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI                (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI                (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE                (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD                BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP       BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR       BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE       BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB                BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD                BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC       BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF       BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI                BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI                BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE                BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_resume_port_tx                             = 0x041C,
        i40e_aqc_opc_configure_partition_bw                     = 0x041D,
 
-       /* hmc */
-       i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
-       i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
-
        /* phy commands*/
        i40e_aqc_opc_get_phy_abilities          = 0x0600,
        i40e_aqc_opc_set_phy_config             = 0x0601,
@@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
 #define I40E_AQ_CAP_ID_WSR_PROT                0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT                0x0080
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data {
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-       u8      pm_profile;
-       u8      pe_vf_enabled;
-       u8      reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-       /* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-       I40E_HMC_PROFILE_DEFAULT        = 1,
-       I40E_HMC_PROFILE_FAVOR_VF       = 2,
-       I40E_HMC_PROFILE_EQUAL          = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
-
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type {
 
 enum i40e_aq_link_speed {
        I40E_LINK_SPEED_UNKNOWN = 0,
-       I40E_LINK_SPEED_100MB   = (1 << I40E_LINK_SPEED_100MB_SHIFT),
-       I40E_LINK_SPEED_1GB     = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
-       I40E_LINK_SPEED_10GB    = (1 << I40E_LINK_SPEED_10GB_SHIFT),
-       I40E_LINK_SPEED_40GB    = (1 << I40E_LINK_SPEED_40GB_SHIFT),
-       I40E_LINK_SPEED_20GB    = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+       I40E_LINK_SPEED_100MB   = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+       I40E_LINK_SPEED_1GB     = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+       I40E_LINK_SPEED_10GB    = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+       I40E_LINK_SPEED_40GB    = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+       I40E_LINK_SPEED_20GB    = BIT(I40E_LINK_SPEED_20GB_SHIFT)
 };
 
 struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-                               (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+                               BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
 #define I40E_AQ_ANVM_FEATURE           0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD   BIT(FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
@@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID         BIT(15)
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT      0
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK       (0x3FF << \
                                        I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data {
 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
 
 struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID         BIT(15)
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT      0
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK       (0x3FF << \
                                        I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
        __le16  vsi_id;
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT  0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   (0x1 << \
-                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+                               BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
 
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI    0
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF     1
index 4db0c03261857b2fc81aa2431ad506d09b89076c..8f64204000fbb5849146316df8b8d8cce2850a6a 100644 (file)
@@ -59,6 +59,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_1G_BASE_T_X722:
                case I40E_DEV_ID_10G_BASE_T_X722:
                case I40E_DEV_ID_SFP_I_X722:
+               case I40E_DEV_ID_QSFP_I_X722:
                        hw->mac.type = I40E_MAC_X722;
                        break;
                case I40E_DEV_ID_X722_VF:
index 70235706915ee8c7be6ea35cd4e4ca31be9e6f9a..d34972bab09c0b023ca783382164539ef28bef09 100644 (file)
@@ -45,6 +45,7 @@
 #define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
 #define I40E_DEV_ID_SFP_I_X722         0x37D3
+#define I40E_DEV_ID_QSFP_I_X722                0x37D4
 #define I40E_DEV_ID_X722_VF            0x37CD
 #define I40E_DEV_ID_X722_VF_HV         0x37D9
 
index 0c912a4999dbc2665664330fc248b8c471d2d01e..fd7dae46c5d819cb4c04585d3ba631b3fe1a3554 100644 (file)
@@ -496,7 +496,6 @@ err:
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
        unsigned long bi_size;
        u16 i;
 
@@ -504,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
-       if (ring_is_ps_enabled(rx_ring)) {
-               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
-               rx_bi = &rx_ring->rx_bi[0];
-               if (rx_bi->hdr_buf) {
-                       dma_free_coherent(dev,
-                                         bufsz,
-                                         rx_bi->hdr_buf,
-                                         rx_bi->dma);
-                       for (i = 0; i < rx_ring->count; i++) {
-                               rx_bi = &rx_ring->rx_bi[i];
-                               rx_bi->dma = 0;
-                               rx_bi->hdr_buf = NULL;
-                       }
-               }
-       }
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               if (rx_bi->dma) {
-                       dma_unmap_single(dev,
-                                        rx_bi->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_bi->dma = 0;
-               }
+               struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
                if (rx_bi->skb) {
                        dev_kfree_skb(rx_bi->skb);
                        rx_bi->skb = NULL;
                }
-               if (rx_bi->page) {
-                       if (rx_bi->page_dma) {
-                               dma_unmap_page(dev,
-                                              rx_bi->page_dma,
-                                              PAGE_SIZE,
-                                              DMA_FROM_DEVICE);
-                               rx_bi->page_dma = 0;
-                       }
-                       __free_page(rx_bi->page);
-                       rx_bi->page = NULL;
-                       rx_bi->page_offset = 0;
-               }
+               if (!rx_bi->page)
+                       continue;
+
+               dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_pages(rx_bi->page, 0);
+
+               rx_bi->page = NULL;
+               rx_bi->page_offset = 0;
        }
 
        bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -554,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 }
@@ -577,37 +551,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
-/**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
-       struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
-       dma_addr_t dma;
-       void *buffer;
-       int buf_size;
-       int i;
-
-       if (rx_ring->rx_bi[0].hdr_buf)
-               return;
-       /* Make sure the buffers don't cross cache line boundaries. */
-       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
-       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
-                                   &dma, GFP_KERNEL);
-       if (!buffer)
-               return;
-       for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               rx_bi->dma = dma + (i * buf_size);
-               rx_bi->hdr_buf = buffer + (i * buf_size);
-       }
-}
-
 /**
  * i40evf_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -629,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
        u64_stats_init(&rx_ring->syncp);
 
        /* Round up to nearest 4K */
-       rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
-               ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
-               : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+       rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
        rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
@@ -642,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
                goto err;
        }
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
@@ -660,6 +602,10 @@ err:
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
        rx_ring->next_to_use = val;
+
+       /* update next to alloc since we have filled the ring */
+       rx_ring->next_to_alloc = val;
+
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -670,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 }
 
 /**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
  *
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
  **/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+                                  struct i40e_rx_buffer *bi)
 {
-       u16 i = rx_ring->next_to_use;
-       union i40e_rx_desc *rx_desc;
-       struct i40e_rx_buffer *bi;
-       const int current_node = numa_node_id();
+       struct page *page = bi->page;
+       dma_addr_t dma;
 
-       /* do nothing if no valid netdev defined */
-       if (!rx_ring->netdev || !cleaned_count)
-               return false;
+       /* since we are recycling buffers we should seldom need to alloc */
+       if (likely(page)) {
+               rx_ring->rx_stats.page_reuse_count++;
+               return true;
+       }
 
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
+       /* alloc new page for storage */
+       page = dev_alloc_page();
+       if (unlikely(!page)) {
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
+       }
 
-               if (bi->skb) /* desc is in use */
-                       goto no_buffers;
+       /* map page for use */
+       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 
-       /* If we've been moved to a different NUMA node, release the
-        * page so we can get a new one on the current node.
+       /* if mapping failed free memory back to system since
+        * there isn't much point in holding memory we can't use
         */
-               if (bi->page &&  page_to_nid(bi->page) != current_node) {
-                       dma_unmap_page(rx_ring->dev,
-                                      bi->page_dma,
-                                      PAGE_SIZE,
-                                      DMA_FROM_DEVICE);
-                       __free_page(bi->page);
-                       bi->page = NULL;
-                       bi->page_dma = 0;
-                       rx_ring->rx_stats.realloc_count++;
-               } else if (bi->page) {
-                       rx_ring->rx_stats.page_reuse_count++;
-               }
-
-               if (!bi->page) {
-                       bi->page = alloc_page(GFP_ATOMIC);
-                       if (!bi->page) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               goto no_buffers;
-                       }
-                       bi->page_dma = dma_map_page(rx_ring->dev,
-                                                   bi->page,
-                                                   0,
-                                                   PAGE_SIZE,
-                                                   DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               __free_page(bi->page);
-                               bi->page = NULL;
-                               bi->page_dma = 0;
-                               bi->page_offset = 0;
-                               goto no_buffers;
-                       }
-                       bi->page_offset = 0;
-               }
-
-               /* Refresh the desc even if buffer_addrs didn't change
-                * because each write-back erases this info.
-                */
-               rx_desc->read.pkt_addr =
-                               cpu_to_le64(bi->page_dma + bi->page_offset);
-               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
+       if (dma_mapping_error(rx_ring->dev, dma)) {
+               __free_pages(page, 0);
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
        }
 
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+       bi->dma = dma;
+       bi->page = page;
+       bi->page_offset = 0;
 
-       return false;
+       return true;
+}
 
-no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+                            struct sk_buff *skb, u16 vlan_tag)
+{
+       struct i40e_q_vector *q_vector = rx_ring->q_vector;
 
-       /* make sure to come back via polling to try again after
-        * allocation failure
-        */
-       return true;
+       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (vlan_tag & VLAN_VID_MASK))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
  *
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
  **/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
-       u16 i = rx_ring->next_to_use;
+       u16 ntu = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
        struct i40e_rx_buffer *bi;
-       struct sk_buff *skb;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
                return false;
 
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
-               skb = bi->skb;
-
-               if (!skb) {
-                       skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                         rx_ring->rx_buf_len,
-                                                         GFP_ATOMIC |
-                                                         __GFP_NOWARN);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               goto no_buffers;
-                       }
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       bi->skb = skb;
-               }
+       rx_desc = I40E_RX_DESC(rx_ring, ntu);
+       bi = &rx_ring->rx_bi[ntu];
 
-               if (!bi->dma) {
-                       bi->dma = dma_map_single(rx_ring->dev,
-                                                skb->data,
-                                                rx_ring->rx_buf_len,
-                                                DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               bi->dma = 0;
-                               dev_kfree_skb(bi->skb);
-                               bi->skb = NULL;
-                               goto no_buffers;
-                       }
-               }
+       do {
+               if (!i40e_alloc_mapped_page(rx_ring, bi))
+                       goto no_buffers;
 
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
                rx_desc->read.hdr_addr = 0;
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
-       }
 
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+               rx_desc++;
+               bi++;
+               ntu++;
+               if (unlikely(ntu == rx_ring->count)) {
+                       rx_desc = I40E_RX_DESC(rx_ring, 0);
+                       bi = rx_ring->rx_bi;
+                       ntu = 0;
+               }
+
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->wb.qword1.status_error_len = 0;
+
+               cleaned_count--;
+       } while (cleaned_count);
+
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
 
        return false;
 
 no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
 
        /* make sure to come back via polling to try again after
         * allocation failure
@@ -831,42 +739,36 @@ no_buffers:
        return true;
 }
 
-/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring:  rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
-                            struct sk_buff *skb, u16 vlan_tag)
-{
-       struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
-       if (vlan_tag & VLAN_VID_MASK)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-       napi_gro_receive(&q_vector->napi, skb);
-}
-
 /**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
-                                   u32 rx_status,
-                                   u32 rx_error,
-                                   u16 rx_ptype)
+                                   union i40e_rx_desc *rx_desc)
 {
-       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
-       bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+       struct i40e_rx_ptype_decoded decoded;
+       bool ipv4, ipv6, tunnel = false;
+       u32 rx_error, rx_status;
+       u8 ptype;
+       u64 qword;
+
+       qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+       rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                  I40E_RXD_QW1_ERROR_SHIFT;
+       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                   I40E_RXD_QW1_STATUS_SHIFT;
+       decoded = decode_rx_desc_ptype(ptype);
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       skb_checksum_none_assert(skb);
+
        /* Rx csum enabled and ip headers found? */
        if (!(vsi->netdev->features & NETIF_F_RXCSUM))
                return;
@@ -912,14 +814,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * doesn't make it a hard requirement so if we have validated the
         * inner checksum report CHECKSUM_UNNECESSARY.
         */
-
-       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+                                 I40E_RX_PTYPE_INNER_PROT_UDP |
+                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
+               tunnel = true;
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+       skb->csum_level = tunnel ? 1 : 0;
 
        return;
 
@@ -933,7 +834,7 @@ checksum_fail:
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
 {
        struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -961,7 +862,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
                                u8 rx_ptype)
 {
        u32 hash;
-       const __le64 rss_mask  =
+       const __le64 rss_mask =
                cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
                            I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
@@ -975,315 +876,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
 }
 
 /**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
  *
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+                              union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+                              u8 rx_ptype)
 {
-       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
-       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u16 i = rx_ring->next_to_clean;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       bool failure = false;
-       u8 rx_ptype;
-       u64 qword;
-       u32 copysize;
+       i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
-               struct sk_buff *skb;
-               u16 vlan_tag;
-               /* return some buffers to hardware, one at a time is too slow */
-               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       failure = failure ||
-                                 i40evf_alloc_rx_buffers_ps(rx_ring,
-                                                            cleaned_count);
-                       cleaned_count = 0;
-               }
+       /* modifies the skb - consumes the enet header */
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+       i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
-               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
-                       break;
+       skb_record_rx_queue(skb, rx_ring->queue_index);
+}
 
-               /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * DD bit is set.
-                */
-               dma_rmb();
-               /* sync header buffer for reading */
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             rx_ring->rx_bi[0].dma,
-                                             i * rx_ring->rx_hdr_len,
-                                             rx_ring->rx_hdr_len,
-                                             DMA_FROM_DEVICE);
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               if (likely(!skb)) {
-                       skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                         rx_ring->rx_hdr_len,
-                                                         GFP_ATOMIC |
-                                                         __GFP_NOWARN);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               failure = true;
-                               break;
-                       }
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
 
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_ring->rx_bi[0].dma,
-                                                     i * rx_ring->rx_hdr_len,
-                                                     rx_ring->rx_hdr_len,
-                                                     DMA_FROM_DEVICE);
-               }
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-               rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-               rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
-                        I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+       /* it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               /* sync half-page for reading */
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             rx_bi->page_dma,
-                                             rx_bi->page_offset,
-                                             PAGE_SIZE / 2,
-                                             DMA_FROM_DEVICE);
-               prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
-               rx_bi->skb = NULL;
-               cleaned_count++;
-               copysize = 0;
-               if (rx_hbo || rx_sph) {
-                       int len;
-
-                       if (rx_hbo)
-                               len = I40E_RX_HDR_SIZE;
-                       else
-                               len = rx_header_len;
-                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
-               } else if (skb->len == 0) {
-                       int len;
-                       unsigned char *va = page_address(rx_bi->page) +
-                                           rx_bi->page_offset;
-
-                       len = min(rx_packet_len, rx_ring->rx_hdr_len);
-                       memcpy(__skb_put(skb, len), va, len);
-                       copysize = len;
-                       rx_packet_len -= len;
-               }
-               /* Get the rest of the data if this was a header split */
-               if (rx_packet_len) {
-                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                                       rx_bi->page,
-                                       rx_bi->page_offset + copysize,
-                                       rx_packet_len, I40E_RXBUFFER_2048);
-
-                       /* If the page count is more than 2, then both halves
-                        * of the page are used and we need to free it. Do it
-                        * here instead of in the alloc code. Otherwise one
-                        * of the half-pages might be released between now and
-                        * then, and we wouldn't know which one to use.
-                        * Don't call get_page and free_page since those are
-                        * both expensive atomic operations that just change
-                        * the refcount in opposite directions. Just give the
-                        * page to the stack; he can have our refcount.
-                        */
-                       if (page_count(rx_bi->page) > 2) {
-                               dma_unmap_page(rx_ring->dev,
-                                              rx_bi->page_dma,
-                                              PAGE_SIZE,
-                                              DMA_FROM_DEVICE);
-                               rx_bi->page = NULL;
-                               rx_bi->page_dma = 0;
-                               rx_ring->rx_stats.realloc_count++;
-                       } else {
-                               get_page(rx_bi->page);
-                               /* switch to the other half-page here; the
-                                * allocation code programs the right addr
-                                * into HW. If we haven't used this half-page,
-                                * the address won't be changed, and HW can
-                                * just use it next time through.
-                                */
-                               rx_bi->page_offset ^= PAGE_SIZE / 2;
-                       }
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
 
-               }
-               I40E_RX_INCREMENT(rx_ring, i);
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
 
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       struct i40e_rx_buffer *next_buffer;
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
 
-                       next_buffer = &rx_ring->rx_bi[i];
-                       next_buffer->skb = skb;
-                       rx_ring->rx_stats.non_eop_descs++;
-                       continue;
-               }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               i40e_pull_tail(rx_ring, skb);
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+       /* if eth_skb_pad returns an error the skb was freed */
+       if (eth_skb_pad(skb))
+               return true;
 
-               i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+       return false;
+}
 
-               /* probably a little skewed due to removing CRC */
-               total_rx_bytes += skb->len;
-               total_rx_packets++;
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *old_buff)
+{
+       struct i40e_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
 
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       new_buff = &rx_ring->rx_bi[nta];
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
-#ifdef I40E_FCOE
-               if (unlikely(
-                   i40e_rx_is_fcoe(rx_ptype) &&
-                   !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+       /* transfer page from old buffer to new buffer */
+       *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+                            struct i40e_rx_buffer *rx_buffer,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb)
+{
+       struct page *page = rx_buffer->page;
+       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                           I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
 #endif
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_desc->wb.qword1.status_error_len = 0;
+       /* will the data fit in the skb we allocated? if so, just
+        * copy it as it is pretty small anyway
+        */
+       if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
 
-       } while (likely(total_rx_packets < budget));
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-       u64_stats_update_begin(&rx_ring->syncp);
-       rx_ring->stats.packets += total_rx_packets;
-       rx_ring->stats.bytes += total_rx_bytes;
-       u64_stats_update_end(&rx_ring->syncp);
-       rx_ring->q_vector->rx.total_packets += total_rx_packets;
-       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!i40e_page_is_reserved(page)))
+                       return true;
 
-       return failure ? budget : total_rx_packets;
+               /* this page cannot be reused so discard it */
+               __free_pages(page, 0);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(i40e_page_is_reserved(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       get_page(rx_buffer->page);
+
+       return true;
 }
 
 /**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
  *
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+                                      union i40e_rx_desc *rx_desc)
+{
+       struct i40e_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                                      I40E_RX_HDR_SIZE,
+                                      GFP_ATOMIC | __GFP_NOWARN);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_buff_failed++;
+                       return NULL;
+               }
+
+               /* we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+       } else {
+               rx_buffer->skb = NULL;
+       }
+
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     I40E_RXBUFFER_2048,
+                                     DMA_FROM_DEVICE);
+
+       /* pull page into skb */
+       if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               i40e_reuse_rx_page(rx_ring, rx_buffer);
+               rx_ring->rx_stats.page_reuse_count++;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->page = NULL;
+
+       return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
  **/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+                           union i40e_rx_desc *rx_desc,
+                           struct sk_buff *skb)
+{
+       u32 ntc = rx_ring->next_to_clean + 1;
+
+       /* fetch, update, and store next to clean */
+       ntc = (ntc < rx_ring->count) ? ntc : 0;
+       rx_ring->next_to_clean = ntc;
+
+       prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+       /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+       if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+               return false;
+
+       /* place skb in next buffer to be received */
+       rx_ring->rx_bi[ntc].skb = skb;
+       rx_ring->rx_stats.non_eop_descs++;
+
+       return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       u16 rx_packet_len;
        bool failure = false;
-       u8 rx_ptype;
-       u64 qword;
-       u16 i;
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
+       while (likely(total_rx_packets < budget)) {
+               union i40e_rx_desc *rx_desc;
                struct sk_buff *skb;
+               u32 rx_status;
                u16 vlan_tag;
+               u8 rx_ptype;
+               u64 qword;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
                        failure = failure ||
-                                 i40evf_alloc_rx_buffers_1buf(rx_ring,
-                                                              cleaned_count);
+                                 i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
+               rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+                           I40E_RXD_QW1_STATUS_SHIFT;
 
                if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
+               /* status_error_len will always be zero for unused descriptors
+                * because it's cleared in cleanup, and overlaps with hdr_addr
+                * which is always zero because packet split isn't used, if the
+                * hardware wrote DD then it will be non-zero
+                */
+               if (!rx_desc->wb.qword1.status_error_len)
+                       break;
+
                /* This memory barrier is needed to keep us from reading
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
                dma_rmb();
 
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               prefetch(skb->data);
-
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+               if (!skb)
+                       break;
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               rx_bi->skb = NULL;
                cleaned_count++;
 
-               /* Get the header and possibly the whole packet
-                * If this is an skb from previous receive dma will be 0
-                */
-               skb_put(skb, rx_packet_len);
-               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
-                                DMA_FROM_DEVICE);
-               rx_bi->dma = 0;
-
-               I40E_RX_INCREMENT(rx_ring, i);
-
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       rx_ring->rx_stats.non_eop_descs++;
+               if (i40e_is_non_eop(rx_ring, rx_desc, skb))
                        continue;
-               }
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               /* ERR_MASK will only have valid bits if EOP set, and
+                * what we are doing here is actually checking
+                * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+                * the error field
+                */
+               if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
 
-               i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+               if (i40e_cleanup_headers(rx_ring, skb))
+                       continue;
+
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
-               total_rx_packets++;
 
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+               /* populate checksum, VLAN, and protocol */
+               i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
 
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_desc->wb.qword1.status_error_len = 0;
-       } while (likely(total_rx_packets < budget));
+               /* update budget accounting */
+               total_rx_packets++;
+       }
 
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
@@ -1292,6 +1289,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
+       /* guarantee a trip back through this routine if there was a failure */
        return failure ? budget : total_rx_packets;
 }
 
@@ -1433,12 +1431,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
-               int cleaned;
-
-               if (ring_is_ps_enabled(ring))
-                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
-               else
-                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
 
                work_done += cleaned;
                /* if we clean as many as budgeted, we must not be done */
@@ -1564,9 +1557,16 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
                ip.v6->payload_len = 0;
        }
 
-       if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+       if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+                                        SKB_GSO_GRE_CSUM |
+                                        SKB_GSO_IPIP |
+                                        SKB_GSO_SIT |
+                                        SKB_GSO_UDP_TUNNEL |
                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
-               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+               if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+                       l4.udp->len = 0;
+
                        /* determine offset of outer transport header */
                        l4_offset = l4.hdr - skb->data;
 
@@ -1665,13 +1665,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                                 &l4_proto, &frag_off);
                }
 
-               /* compute outer L3 header size */
-               tunnel |= ((l4.hdr - ip.hdr) / 4) <<
-                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
-               /* switch IP header pointer from outer to inner header */
-               ip.hdr = skb_inner_network_header(skb);
-
                /* define outer transport */
                switch (l4_proto) {
                case IPPROTO_UDP:
@@ -1682,6 +1675,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                        tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
                        *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
+               case IPPROTO_IPIP:
+               case IPPROTO_IPV6:
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+                       l4.hdr = skb_inner_network_header(skb);
+                       break;
                default:
                        if (*tx_flags & I40E_TX_FLAGS_TSO)
                                return -1;
@@ -1690,12 +1688,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                        return 0;
                }
 
+               /* compute outer L3 header size */
+               tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+               /* switch IP header pointer from outer to inner header */
+               ip.hdr = skb_inner_network_header(skb);
+
                /* compute tunnel header size */
                tunnel |= ((ip.hdr - l4.hdr) / 2) <<
                          I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
                /* indicate if we need to offload outer UDP header */
                if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+                   !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
                        tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
 
@@ -1800,35 +1806,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
 }
 
 /**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40evf_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -1837,21 +1842,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -1861,7 +1866,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index 84c28aa64fdf9fd8ea20e276a15c6297cfd237c0..0112277e5882a995cc2d176a220a1391f74c77f8 100644 (file)
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
        (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
          I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512   512    /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  */
-#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+                                    const u64 stat_err_bits)
+{
+       return !!(rx_desc->wb.qword1.status_error_len &
+                 cpu_to_le64(stat_err_bits));
+}
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t {
                prefetch((n));                          \
        } while (0)
 
-#define i40e_rx_desc i40e_32byte_rx_desc
-
 #define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 
@@ -212,10 +229,8 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
-       void *hdr_buf;
        dma_addr_t dma;
        struct page *page;
-       dma_addr_t page_dma;
        unsigned int page_offset;
 };
 
@@ -244,22 +259,18 @@ struct i40e_rx_queue_stats {
 enum i40e_ring_state_t {
        __I40E_TX_FDIR_INIT_DONE,
        __I40E_TX_XPS_INIT_DONE,
-       __I40E_RX_PS_ENABLED,
-       __I40E_RX_16BYTE_DESC_ENABLED,
 };
 
-#define ring_is_ps_enabled(ring) \
-       test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-       set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-       clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
-       test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
-       set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
-       clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
@@ -278,16 +289,7 @@ struct i40e_ring {
 
        u16 count;                      /* Number of descriptors */
        u16 reg_idx;                    /* HW register index of the ring */
-       u16 rx_hdr_len;
        u16 rx_buf_len;
-       u8  dtype;
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
 
        /* used in interrupt processing */
        u16 next_to_use;
@@ -319,6 +321,7 @@ struct i40e_ring {
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 
        struct rcu_head rcu;            /* to avoid race on free */
+       u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -342,9 +345,7 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -424,11 +425,15 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40evf_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40evf_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 
 /**
index 4a78c18e0b7b19058968af7ba3be3a717371cf57..97f96e0d9c4c4000cc8d003129f09c1fb93f3219 100644 (file)
@@ -36,7 +36,7 @@
 #include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
 
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
@@ -258,6 +258,11 @@ struct i40e_hw_capabilities {
 #define I40E_FLEX10_STATUS_DCC_ERROR   0x1
 #define I40E_FLEX10_STATUS_VC_MODE     0x2
 
+       bool sec_rev_disabled;
+       bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED  0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -523,6 +528,7 @@ struct i40e_hw {
        struct i40e_aq_desc nvm_wb_desc;
        struct i40e_virt_mem nvm_buff;
        bool nvm_release_on_done;
+       u16 nvm_wait_opcode;
 
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
index e657eccd232c6ef54f86a73056857257e549101f..fa044a904208ce74a176fac36c9792b67504006a 100644 (file)
@@ -67,8 +67,6 @@ struct i40e_vsi {
        u16 rx_itr_setting;
        u16 tx_itr_setting;
        u16 qs_handle;
-       u8 *rss_hkey_user; /* User configured hash keys */
-       u8 *rss_lut_user;  /* User configured lookup table entries */
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@ struct i40e_vsi {
 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE  32
 
 /* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64    64     /* Used for packet split */
-#define I40EVF_RXBUFFER_128   128    /* Used for packet split */
-#define I40EVF_RXBUFFER_256   256    /* Used for packet split */
 #define I40EVF_RXBUFFER_2048  2048
 #define I40EVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 #define I40EVF_MAX_AQ_BUF_SIZE    4096
@@ -210,9 +205,6 @@ struct i40evf_adapter {
 
        u32 flags;
 #define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
 #define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
 #define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
 #define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
@@ -222,6 +214,7 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_WB_ON_ITR_CAPABLE          BIT(11)
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE     BIT(12)
 #define I40EVF_FLAG_ADDR_SET_BY_PF             BIT(13)
+#define I40EVF_FLAG_PROMISC_ON                 BIT(15)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
@@ -239,8 +232,15 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                BIT(6)
 #define I40EVF_FLAG_AQ_MAP_VECTORS             BIT(7)
 #define I40EVF_FLAG_AQ_HANDLE_RESET            BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS           BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS           BIT(9)  /* direct AQ config */
 #define I40EVF_FLAG_AQ_GET_CONFIG              BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA                        BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA                        BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY             BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT             BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC         BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC         BIT(16)
 
        /* OS defined structs */
        struct net_device *netdev;
@@ -256,10 +256,18 @@ struct i40evf_adapter {
        bool netdev_registered;
        bool link_up;
        enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
-                           I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+                           (_a)->vf_res->vf_offload_flags & \
+                               I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+                           0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+                   I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
 #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
                    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+                      (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
 #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
                          I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
        struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +279,16 @@ struct i40evf_adapter {
        struct i40e_eth_stats current_stats;
        struct i40e_vsi vsi;
        u32 aq_wait_count;
+       /* RSS stuff */
+       u64 hena;
+       u16 rss_key_size;
+       u16 rss_lut_size;
+       u8 *rss_key;
+       u8 *rss_lut;
 };
 
 
 /* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS           BIT(0)
 
 /* needed by i40evf_ethtool.c */
 extern char i40evf_driver_name[];
@@ -314,11 +327,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter);
 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
 void i40evf_request_stats(struct i40evf_adapter *adapter);
 void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
 void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                                enum i40e_virtchnl_ops v_opcode,
                                i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
-                     u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
-                  u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
 #endif /* _I40EVF_H_ */
index dd4430aae7fa4aa5f09b03c5bb86d96395c7ebf9..c9c202f6c52172a12dd1fe94d8b6c2acc0ebbcfa 100644 (file)
@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
 #define I40EVF_STATS_LEN(_dev) \
        (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
 
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
-       "packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
 /**
  * i40evf_get_settings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
 {
        if (sset == ETH_SS_STATS)
                return I40EVF_STATS_LEN(netdev);
-       else if (sset == ETH_SS_PRIV_FLAGS)
-               return I40EVF_PRIV_FLAGS_STR_LEN;
        else
                return -EINVAL;
 }
@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-       } else if (sset == ETH_SS_PRIV_FLAGS) {
-               for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
-                       memcpy(data, i40evf_priv_flags_strings[i],
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
        }
 }
 
@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->version, i40evf_driver_version, 32);
        strlcpy(drvinfo->fw_version, "N/A", 4);
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-       drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
 }
 
 /**
@@ -377,63 +362,6 @@ static int i40evf_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
-                                   struct ethtool_rxnfc *cmd)
-{
-       struct i40e_hw *hw = &adapter->hw;
-       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
-                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
-       /* We always hash on IP src and dest addresses */
-       cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
-       switch (cmd->flow_type) {
-       case TCP_V4_FLOW:
-               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
-                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               break;
-       case UDP_V4_FLOW:
-               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
-                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               break;
-
-       case SCTP_V4_FLOW:
-       case AH_ESP_V4_FLOW:
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-       case IPV4_FLOW:
-               break;
-
-       case TCP_V6_FLOW:
-               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
-                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               break;
-       case UDP_V6_FLOW:
-               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
-                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               break;
-
-       case SCTP_V6_FLOW:
-       case AH_ESP_V6_FLOW:
-       case AH_V6_FLOW:
-       case ESP_V6_FLOW:
-       case IPV6_FLOW:
-               break;
-       default:
-               cmd->data = 0;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 /**
  * i40evf_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
@@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
                ret = 0;
                break;
        case ETHTOOL_GRXFH:
-               ret = i40evf_get_rss_hash_opts(adapter, cmd);
-               break;
-       default:
-               break;
-       }
-
-       return ret;
-}
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
-                                  struct ethtool_rxnfc *nfc)
-{
-       struct i40e_hw *hw = &adapter->hw;
-       u32 flags = adapter->vf_res->vf_offload_flags;
-
-       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
-                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
-       /* RSS does not support anything other than hashing
-        * to queues on src and dst IPs and ports
-        */
-       if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
-                         RXH_L4_B_0_1 | RXH_L4_B_2_3))
-               return -EINVAL;
-
-       /* We need at least the IP SRC and DEST fields for hashing */
-       if (!(nfc->data & RXH_IP_SRC) ||
-           !(nfc->data & RXH_IP_DST))
-               return -EINVAL;
-
-       switch (nfc->flow_type) {
-       case TCP_V4_FLOW:
-               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-                               hena |=
-                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
-                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-               } else {
-                       return -EINVAL;
-               }
-               break;
-       case TCP_V6_FLOW:
-               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-                               hena |=
-                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
-                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-               } else {
-                       return -EINVAL;
-               }
-               break;
-       case UDP_V4_FLOW:
-               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-                               hena |=
-                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
-                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-               } else {
-                       return -EINVAL;
-               }
-               break;
-       case UDP_V6_FLOW:
-               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-                               hena |=
-                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
-                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-               } else {
-                       return -EINVAL;
-               }
-               break;
-       case AH_ESP_V4_FLOW:
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-       case SCTP_V4_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
-                       return -EINVAL;
-               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
-               break;
-       case AH_ESP_V6_FLOW:
-       case AH_V6_FLOW:
-       case ESP_V6_FLOW:
-       case SCTP_V6_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
-                       return -EINVAL;
-               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
-               break;
-       case IPV4_FLOW:
-               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-               break;
-       case IPV6_FLOW:
-               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-       i40e_flush(hw);
-
-       return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
-                           struct ethtool_rxnfc *cmd)
-{
-       struct i40evf_adapter *adapter = netdev_priv(netdev);
-       int ret = -EOPNOTSUPP;
-
-       switch (cmd->cmd) {
-       case ETHTOOL_SRXFH:
-               ret = i40evf_set_rss_hash_opt(adapter, cmd);
+               netdev_info(netdev,
+                           "RSS hash info is not available to vf, use pf.\n");
                break;
        default:
                break;
@@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev,
 
        return ret;
 }
-
 /**
  * i40evf_get_channels: get the number of channels supported by the device
  * @netdev: network interface device structure
@@ -623,6 +413,19 @@ static void i40evf_get_channels(struct net_device *netdev,
        ch->combined_count = adapter->num_active_queues;
 }
 
+/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->rss_key_size;
+}
+
 /**
  * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
  * @netdev: network interface device structure
@@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev,
  **/
 static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
 {
-       return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->rss_lut_size;
 }
 
 /**
@@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
                           u8 *hfunc)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
-       struct i40e_vsi *vsi = &adapter->vsi;
-       u8 *seed = NULL, *lut;
-       int ret;
        u16 i;
 
        if (hfunc)
@@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        if (!indir)
                return 0;
 
-       seed = key;
-
-       lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
-       if (!lut)
-               return -ENOMEM;
-
-       ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
-       if (ret)
-               goto out;
+       memcpy(key, adapter->rss_key, adapter->rss_key_size);
 
        /* Each 32 bits pointed by 'indir' is stored with a lut entry */
-       for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
-               indir[i] = (u32)lut[i];
+       for (i = 0; i < adapter->rss_lut_size; i++)
+               indir[i] = (u32)adapter->rss_lut[i];
 
-out:
-       kfree(lut);
-
-       return ret;
+       return 0;
 }
 
 /**
@@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
                           const u8 *key, const u8 hfunc)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
-       struct i40e_vsi *vsi = &adapter->vsi;
-       u8 *seed = NULL;
        u16 i;
 
        /* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
                return 0;
 
        if (key) {
-               if (!vsi->rss_hkey_user) {
-                       vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
-                                                    GFP_KERNEL);
-                       if (!vsi->rss_hkey_user)
-                               return -ENOMEM;
-               }
-               memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
-               seed = vsi->rss_hkey_user;
-       }
-       if (!vsi->rss_lut_user) {
-               vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
-                                           GFP_KERNEL);
-               if (!vsi->rss_lut_user)
-                       return -ENOMEM;
+               memcpy(adapter->rss_key, key, adapter->rss_key_size);
        }
 
        /* Each 32 bits pointed by 'indir' is stored with a lut entry */
-       for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
-               vsi->rss_lut_user[i] = (u8)(indir[i]);
-
-       return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
-                                I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
-       struct i40evf_adapter *adapter = netdev_priv(dev);
-       u32 ret_flags = 0;
-
-       ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
-               I40EVF_PRIV_FLAGS_PS : 0;
-
-       return ret_flags;
-}
+       for (i = 0; i < adapter->rss_lut_size; i++)
+               adapter->rss_lut[i] = (u8)(indir[i]);
 
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
-       struct i40evf_adapter *adapter = netdev_priv(dev);
-       bool reset_required = false;
-
-       if ((flags & I40EVF_PRIV_FLAGS_PS) &&
-           !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
-               adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
-               reset_required = true;
-       } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
-                  (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
-               adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
-               reset_required = true;
-       }
-
-       /* if needed, issue reset to cause things to take effect */
-       if (reset_required)
-               i40evf_schedule_reset(adapter);
-
-       return 0;
+       return i40evf_config_rss(adapter);
 }
 
 static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
        .get_strings            = i40evf_get_strings,
        .get_ethtool_stats      = i40evf_get_ethtool_stats,
        .get_sset_count         = i40evf_get_sset_count,
-       .get_priv_flags         = i40evf_get_priv_flags,
-       .set_priv_flags         = i40evf_set_priv_flags,
        .get_msglevel           = i40evf_get_msglevel,
        .set_msglevel           = i40evf_set_msglevel,
        .get_coalesce           = i40evf_get_coalesce,
        .set_coalesce           = i40evf_set_coalesce,
        .get_rxnfc              = i40evf_get_rxnfc,
-       .set_rxnfc              = i40evf_set_rxnfc,
        .get_rxfh_indir_size    = i40evf_get_rxfh_indir_size,
        .get_rxfh               = i40evf_get_rxfh,
        .set_rxfh               = i40evf_set_rxfh,
        .get_channels           = i40evf_get_channels,
+       .get_rxfh_key_size      = i40evf_get_rxfh_key_size,
 };
 
 /**
index 9110319a8f00a9a61c3ad6485967afb1cd31eaf6..b548dbe78cd3439bc0293c27283ee8a072536e2c 100644 (file)
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 5
+#define DRV_VERSION_BUILD 10
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD) \
@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
 {
        struct i40e_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        int i;
-       int rx_buf_len;
-
-
-       /* Set the RX buffer length according to the mode */
-       if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
-           netdev->mtu <= ETH_DATA_LEN)
-               rx_buf_len = I40EVF_RXBUFFER_2048;
-       else
-               rx_buf_len = ALIGN(max_frame, 1024);
 
        for (i = 0; i < adapter->num_active_queues; i++) {
                adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
-               adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-               if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-                       set_ring_ps_enabled(&adapter->rx_rings[i]);
-                       adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
-               } else {
-                       clear_ring_ps_enabled(&adapter->rx_rings[i]);
-               }
+               adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
        }
 }
 
@@ -943,6 +926,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 bottom_of_search_loop:
                continue;
        }
+
+       if (netdev->flags & IFF_PROMISC &&
+           !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+               adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+       else if (!(netdev->flags & IFF_PROMISC) &&
+                adapter->flags & I40EVF_FLAG_PROMISC_ON)
+               adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
@@ -999,14 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
        for (i = 0; i < adapter->num_active_queues; i++) {
                struct i40e_ring *ring = &adapter->rx_rings[i];
 
-       if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-               i40evf_alloc_rx_headers(ring);
-               i40evf_alloc_rx_buffers_ps(ring, ring->count);
-       } else {
-               i40evf_alloc_rx_buffers_1buf(ring, ring->count);
-       }
-               ring->next_to_use = ring->count - 1;
-               writel(ring->next_to_use, ring->tail);
+               i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
        }
 }
 
@@ -1224,24 +1208,18 @@ out:
 }
 
 /**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
  *
  * Return 0 on success, negative on failure
  **/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-                               u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
 {
-       struct i40evf_adapter *adapter = vsi->back;
+       struct i40e_aqc_get_set_rss_key_data *rss_key =
+               (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
        struct i40e_hw *hw = &adapter->hw;
        int ret = 0;
 
-       if (!vsi->id)
-               return -EINVAL;
-
        if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
                /* bail because we already have a command pending */
                dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1227,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
                return -EBUSY;
        }
 
-       if (seed) {
-               struct i40e_aqc_get_set_rss_key_data *rss_key =
-                       (struct i40e_aqc_get_set_rss_key_data *)seed;
-               ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
-               if (ret) {
-                       dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
-                               i40evf_stat_str(hw, ret),
-                               i40evf_aq_str(hw, hw->aq.asq_last_status));
-                       return ret;
-               }
+       ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+       if (ret) {
+               dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+                       i40evf_stat_str(hw, ret),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
+               return ret;
+
        }
 
-       if (lut) {
-               ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
-               if (ret) {
-                       dev_err(&adapter->pdev->dev,
-                               "Cannot set RSS lut, err %s aq_err %s\n",
-                               i40evf_stat_str(hw, ret),
-                               i40evf_aq_str(hw, hw->aq.asq_last_status));
-                       return ret;
-               }
+       ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+                                   adapter->rss_lut, adapter->rss_lut_size);
+       if (ret) {
+               dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+                       i40evf_stat_str(hw, ret),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
        }
 
        return ret;
+
 }
 
 /**
  * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
  *
  * Returns 0 on success, negative on failure
  **/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
-                                const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
 {
-       struct i40evf_adapter *adapter = vsi->back;
        struct i40e_hw *hw = &adapter->hw;
+       u32 *dw;
        u16 i;
 
-       if (seed) {
-               u32 *seed_dw = (u32 *)seed;
-
-               for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-                       wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
-       }
-
-       if (lut) {
-               u32 *lut_dw = (u32 *)lut;
+       dw = (u32 *)adapter->rss_key;
+       for (i = 0; i <= adapter->rss_key_size / 4; i++)
+               wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
 
-               if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
-                       return -EINVAL;
+       dw = (u32 *)adapter->rss_lut;
+       for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+               wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
 
-               for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-                       wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
-       }
        i40e_flush(hw);
 
        return 0;
 }
 
-/**
- *  * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- *  @vsi: Pointer to vsi structure
- *  @seed: RSS hash seed
- *  @lut: Lookup table
- *  @lut_size: Lookup table size
- *
- *  Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-                            u8 *lut, u16 lut_size)
-{
-       struct i40evf_adapter *adapter = vsi->back;
-       struct i40e_hw *hw = &adapter->hw;
-       int ret = 0;
-
-       if (seed) {
-               ret = i40evf_aq_get_rss_key(hw, vsi->id,
-                       (struct i40e_aqc_get_set_rss_key_data *)seed);
-               if (ret) {
-                       dev_err(&adapter->pdev->dev,
-                               "Cannot get RSS key, err %s aq_err %s\n",
-                               i40evf_stat_str(hw, ret),
-                               i40evf_aq_str(hw, hw->aq.asq_last_status));
-                       return ret;
-               }
-       }
-
-       if (lut) {
-               ret = i40evf_aq_get_rss_lut(hw, vsi->id, false, lut, lut_size);
-               if (ret) {
-                       dev_err(&adapter->pdev->dev,
-                               "Cannot get RSS lut, err %s aq_err %s\n",
-                               i40evf_stat_str(hw, ret),
-                               i40evf_aq_str(hw, hw->aq.asq_last_status));
-                       return ret;
-               }
-       }
-
-       return ret;
-}
-
-/**
- *  * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- *  @vsi: Pointer to vsi structure
- *  @seed: RSS hash seed
- *  @lut: Lookup table
- *  @lut_size: Lookup table size
- *
- *  Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
-                             const u8 *lut, u16 lut_size)
-{
-       struct i40evf_adapter *adapter = vsi->back;
-       struct i40e_hw *hw = &adapter->hw;
-       u16 i;
-
-       if (seed) {
-               u32 *seed_dw = (u32 *)seed;
-
-               for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-                       seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
-       }
-
-       if (lut) {
-               u32 *lut_dw = (u32 *)lut;
-
-               if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
-                       return -EINVAL;
-
-               for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-                       lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
-       }
-
-       return 0;
-}
-
 /**
  * i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
-                     u8 *lut, u16 lut_size)
-{
-       struct i40evf_adapter *adapter = vsi->back;
-
-       if (RSS_AQ(adapter))
-               return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
-       else
-               return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
  *
  * Returns 0 on success, negative on failure
  **/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
 {
-       struct i40evf_adapter *adapter = vsi->back;
 
-       if (RSS_AQ(adapter))
-               return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
-       else
-               return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+       if (RSS_PF(adapter)) {
+               adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+                                       I40EVF_FLAG_AQ_SET_RSS_KEY;
+               return 0;
+       } else if (RSS_AQ(adapter)) {
+               return i40evf_config_rss_aq(adapter);
+       } else {
+               return i40evf_config_rss_reg(adapter);
+       }
 }
 
 /**
  * i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
  **/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
 {
        u16 i;
 
-       for (i = 0; i < rss_table_size; i++)
-               lut[i] = i % rss_size;
+       for (i = 0; i < adapter->rss_lut_size; i++)
+               adapter->rss_lut[i] = i % adapter->num_active_queues;
 }
 
 /**
@@ -1451,42 +1313,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
  **/
 static int i40evf_init_rss(struct i40evf_adapter *adapter)
 {
-       struct i40e_vsi *vsi = &adapter->vsi;
        struct i40e_hw *hw = &adapter->hw;
-       u8 seed[I40EVF_HKEY_ARRAY_SIZE];
-       u64 hena;
-       u8 *lut;
        int ret;
 
-       /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-       if (adapter->vf_res->vf_offload_flags &
-                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-               hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
-       else
-               hena = I40E_DEFAULT_RSS_HENA;
-       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+       if (!RSS_PF(adapter)) {
+               /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+               if (adapter->vf_res->vf_offload_flags &
+                   I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                       adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+               else
+                       adapter->hena = I40E_DEFAULT_RSS_HENA;
 
-       lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
-       if (!lut)
-               return -ENOMEM;
+               wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+               wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+       }
 
-       /* Use user configured lut if there is one, otherwise use default */
-       if (vsi->rss_lut_user)
-               memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
-       else
-               i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
-                                   adapter->num_active_queues);
+       i40evf_fill_rss_lut(adapter);
 
-       /* Use user configured hash key if there is one, otherwise
-        * user default.
-        */
-       if (vsi->rss_hkey_user)
-               memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
-       else
-               netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
-       ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
-       kfree(lut);
+       netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+       ret = i40evf_config_rss(adapter);
 
        return ret;
 }
@@ -1601,19 +1446,16 @@ err_set_interrupt:
 }
 
 /**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
  **/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
 {
-       if (!vsi)
-               return;
+       kfree(adapter->rss_key);
+       adapter->rss_key = NULL;
 
-       kfree(vsi->rss_hkey_user);
-       vsi->rss_hkey_user = NULL;
-
-       kfree(vsi->rss_lut_user);
-       vsi->rss_lut_user = NULL;
+       kfree(adapter->rss_lut);
+       adapter->rss_lut = NULL;
 }
 
 /**
@@ -1747,6 +1589,33 @@ static void i40evf_watchdog_task(struct work_struct *work)
                adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
                goto watchdog_done;
        }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+               i40evf_get_hena(adapter);
+               goto watchdog_done;
+       }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+               i40evf_set_hena(adapter);
+               goto watchdog_done;
+       }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+               i40evf_set_rss_key(adapter);
+               goto watchdog_done;
+       }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+               i40evf_set_rss_lut(adapter);
+               goto watchdog_done;
+       }
+
+       if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+               i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+                                      I40E_FLAG_VF_MULTICAST_PROMISC);
+               goto watchdog_done;
+       }
+
+       if (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) {
+               i40evf_set_promiscuous(adapter, 0);
+               goto watchdog_done;
+       }
 
        if (adapter->state == __I40EVF_RUNNING)
                i40evf_request_stats(adapter);
@@ -2325,6 +2194,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
 {
        struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
        struct net_device *netdev = adapter->netdev;
+       struct i40e_vsi *vsi = &adapter->vsi;
        int i;
 
        /* got VF config message back from PF, now we can parse it */
@@ -2337,40 +2207,46 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
                return -ENODEV;
        }
 
-       netdev->features |= NETIF_F_HIGHDMA |
-                           NETIF_F_SG |
-                           NETIF_F_IP_CSUM |
-                           NETIF_F_SCTP_CRC |
-                           NETIF_F_IPV6_CSUM |
-                           NETIF_F_TSO |
-                           NETIF_F_TSO6 |
-                           NETIF_F_TSO_ECN |
-                           NETIF_F_GSO_GRE |
-                           NETIF_F_GSO_UDP_TUNNEL |
-                           NETIF_F_RXCSUM |
-                           NETIF_F_GRO;
-
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM             |
-                                  NETIF_F_IPV6_CSUM           |
-                                  NETIF_F_TSO                 |
-                                  NETIF_F_TSO6                |
-                                  NETIF_F_TSO_ECN             |
-                                  NETIF_F_GSO_GRE             |
-                                  NETIF_F_GSO_UDP_TUNNEL      |
-                                  NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-       if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
-               netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-       /* always clear VLAN features because they can change at every reset */
-       netdev->features &= ~(I40EVF_VLAN_FEATURES);
-       /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
-
-       if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-               netdev->vlan_features = netdev->features;
-               netdev->features |= I40EVF_VLAN_FEATURES;
-       }
+       netdev->hw_enc_features |= NETIF_F_SG                   |
+                                  NETIF_F_IP_CSUM              |
+                                  NETIF_F_IPV6_CSUM            |
+                                  NETIF_F_HIGHDMA              |
+                                  NETIF_F_SOFT_FEATURES        |
+                                  NETIF_F_TSO                  |
+                                  NETIF_F_TSO_ECN              |
+                                  NETIF_F_TSO6                 |
+                                  NETIF_F_GSO_GRE              |
+                                  NETIF_F_GSO_GRE_CSUM         |
+                                  NETIF_F_GSO_IPIP             |
+                                  NETIF_F_GSO_SIT              |
+                                  NETIF_F_GSO_UDP_TUNNEL       |
+                                  NETIF_F_GSO_UDP_TUNNEL_CSUM  |
+                                  NETIF_F_GSO_PARTIAL          |
+                                  NETIF_F_SCTP_CRC             |
+                                  NETIF_F_RXHASH               |
+                                  NETIF_F_RXCSUM               |
+                                  0;
+
+       if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+               netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+       netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+       /* record features VLANs can make use of */
+       netdev->vlan_features |= netdev->hw_enc_features |
+                                NETIF_F_TSO_MANGLEID;
+
+       /* Write features and hw_features separately to avoid polluting
+        * with, or dropping, features that are set when we registgered.
+        */
+       netdev->hw_features |= netdev->hw_enc_features;
+
+       netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+       netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+       /* disable VLAN features if not supported */
+       if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+               netdev->features ^= I40EVF_VLAN_FEATURES;
 
        adapter->vsi.id = adapter->vsi_res->vsi_id;
 
@@ -2381,8 +2257,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
                                       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
        adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
                                       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-       adapter->vsi.netdev = adapter->netdev;
-       adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+       vsi->netdev = adapter->netdev;
+       vsi->qs_handle = adapter->vsi_res->qset_handle;
+       if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               adapter->rss_key_size = vfres->rss_key_size;
+               adapter->rss_lut_size = vfres->rss_lut_size;
+       } else {
+               adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+               adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+       }
+
        return 0;
 }
 
@@ -2515,11 +2399,6 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
        adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
-       adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
-       adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
-       /* Default to single buffer rx, can be changed through ethtool. */
-       adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
 
        netdev->netdev_ops = &i40evf_netdev_ops;
        i40evf_set_ethtool_ops(netdev);
@@ -2578,6 +2457,11 @@ static void i40evf_init_task(struct work_struct *work)
        set_bit(__I40E_DOWN, &adapter->vsi.state);
        i40evf_misc_irq_enable(adapter);
 
+       adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+       adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+       if (!adapter->rss_key || !adapter->rss_lut)
+               goto err_mem;
+
        if (RSS_AQ(adapter)) {
                adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
                mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2588,7 +2472,8 @@ static void i40evf_init_task(struct work_struct *work)
 restart:
        schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
        return;
-
+err_mem:
+       i40evf_free_rss(adapter);
 err_register:
        i40evf_free_misc_irq(adapter);
 err_sw_init:
@@ -2870,8 +2755,7 @@ static void i40evf_remove(struct pci_dev *pdev)
 
        flush_scheduled_work();
 
-       /* Clear user configurations for RSS */
-       i40evf_clear_rss_config_user(&adapter->vsi);
+       i40evf_free_rss(adapter);
 
        if (hw->aq.asq.count)
                i40evf_shutdown_adminq(hw);
@@ -2882,7 +2766,6 @@ static void i40evf_remove(struct pci_dev *pdev)
 
        iounmap(hw->hw_addr);
        pci_release_regions(pdev);
-
        i40evf_free_all_tx_resources(adapter);
        i40evf_free_all_rx_resources(adapter);
        i40evf_free_queues(adapter);
index 488e738f76c6dfdc0e75a2780e38ae307cb105c0..c5d33a2cea877752cb7c7b371b9299fdd3760f88 100644 (file)
@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
                vqpi->rxq.max_pkt_size = adapter->netdev->mtu
                                        + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
                vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
-               if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-                       vqpi->rxq.splithdr_enabled = true;
-                       vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
-               }
                vqpi++;
        }
 
@@ -652,6 +648,17 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
                        adapter->current_op);
                return;
        }
+
+       if (flags) {
+               adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+               adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+               dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+       } else {
+               adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+               adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+               dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+       }
+
        adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
        vpi.vsi_id = adapter->vsi_res->vsi_id;
        vpi.flags = flags;
@@ -681,6 +688,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
                /* if the request failed, don't lock out others */
                adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 }
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+       if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+               /* bail because we already have a command pending */
+               dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+                       adapter->current_op);
+               return;
+       }
+       adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+                          NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+       struct i40e_virtchnl_rss_hena vrh;
+
+       if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+               /* bail because we already have a command pending */
+               dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+                       adapter->current_op);
+               return;
+       }
+       vrh.hena = adapter->hena;
+       adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+                          (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+       struct i40e_virtchnl_rss_key *vrk;
+       int len;
+
+       if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+               /* bail because we already have a command pending */
+               dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+                       adapter->current_op);
+               return;
+       }
+       len = sizeof(struct i40e_virtchnl_rss_key) +
+             (adapter->rss_key_size * sizeof(u8)) - 1;
+       vrk = kzalloc(len, GFP_KERNEL);
+       if (!vrk)
+               return;
+       vrk->vsi_id = adapter->vsi.id;
+       vrk->key_len = adapter->rss_key_size;
+       memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+       adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+                          (u8 *)vrk, len);
+       kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+       struct i40e_virtchnl_rss_lut *vrl;
+       int len;
+
+       if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+               /* bail because we already have a command pending */
+               dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+                       adapter->current_op);
+               return;
+       }
+       len = sizeof(struct i40e_virtchnl_rss_lut) +
+             (adapter->rss_lut_size * sizeof(u8)) - 1;
+       vrl = kzalloc(len, GFP_KERNEL);
+       if (!vrl)
+               return;
+       vrl->vsi_id = adapter->vsi.id;
+       vrl->lut_entries = adapter->rss_lut_size;
+       memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+       adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+                          (u8 *)vrl, len);
+       kfree(vrl);
+}
+
 /**
  * i40evf_request_reset
  * @adapter: adapter structure
@@ -820,6 +936,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                if (v_opcode != adapter->current_op)
                        return;
                break;
+       case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+               struct i40e_virtchnl_rss_hena *vrh =
+                       (struct i40e_virtchnl_rss_hena *)msg;
+               if (msglen == sizeof(*vrh))
+                       adapter->hena = vrh->hena;
+               else
+                       dev_warn(&adapter->pdev->dev,
+                                "Invalid message %d from PF\n", v_opcode);
+               }
+               break;
        default:
                if (v_opcode != adapter->current_op)
                        dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
index 8e96c35307fb8e6f8d2a66677aa82a4afe8bbd20..7460bdbe2e49f78b64c017839226b8be4c8097a2 100644 (file)
@@ -383,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter)
                dev_info(&adapter->pdev->dev, "Net device Info\n");
                pr_info("Device Name     state            trans_start      last_rx\n");
                pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-                       netdev->state, netdev->trans_start, netdev->last_rx);
+                       netdev->state, dev_trans_start(netdev), netdev->last_rx);
        }
 
        /* Print Registers */
index d10ed62993c105da40a2335220b4bec3356e0e7c..9f2db18554123973af358972fa9175456ddf2521 100644 (file)
@@ -143,14 +143,11 @@ struct vf_data_storage {
        unsigned char vf_mac_addresses[ETH_ALEN];
        u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
        u16 num_vf_mc_hashes;
-       u16 default_vf_vlan_id;
-       u16 vlans_enabled;
        bool clear_to_send;
        bool pf_set_mac;
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
-       u16 vlan_count;
        u8 spoofchk_enabled;
        bool rss_query_enabled;
        u8 trusted;
@@ -173,7 +170,7 @@ struct vf_macvlans {
 };
 
 #define IXGBE_MAX_TXD_PWR      14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -623,44 +620,45 @@ struct ixgbe_adapter {
         * thus the additional *_CAPABLE flags.
         */
        u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED                 BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED                        BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE             BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE               BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED               BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED                 BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE                 BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED                        BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE                  BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED                 BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE                        BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED                        BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE            BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE            BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG            BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE           BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE                BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE                        BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED                        BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE               BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED               BIT(23)
 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE       BIT(24)
 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED         BIT(25)
 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER     BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE                 BIT(27)
 
        u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP              (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET             (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED             (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT              (u32)(1 << 11)
+#define IXGBE_FLAG2_RSC_CAPABLE                        BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED                        BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE                BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT          BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP             BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET            BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED            BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT       BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED            BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT              BIT(11)
 #define IXGBE_FLAG2_VXLAN_REREG_NEEDED         BIT(12)
 #define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
 
@@ -795,7 +793,7 @@ struct ixgbe_adapter {
        unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
 
 #define IXGBE_MAX_LINK_HANDLE 10
-       struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+       struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
        unsigned long tables;
 
 /* maximum number of RETA entries among all devices supported by ixgbe
@@ -806,6 +804,8 @@ struct ixgbe_adapter {
 
 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
        u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+
+       bool need_crosstalk_fix;
 };
 
 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -828,7 +828,7 @@ struct ixgbe_fdir_filter {
        struct hlist_node fdir_node;
        union ixgbe_atr_input filter;
        u16 sw_idx;
-       u16 action;
+       u64 action;
 };
 
 enum ixgbe_state_t {
@@ -896,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
-                              u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+                        u16 subdevice_id);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
 #endif
index 6ecd598c6ef59928556e7dfb6b7179c11570d277..fb51be74dd4c2f790cd9636439652ad3ad283c55 100644 (file)
@@ -792,7 +792,7 @@ mac_reset_top:
        }
 
        gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
-       gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+       gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
        IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
 
        /*
@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
        bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
        if (vlan_on)
                /* Turn on this VLAN id */
-               bits |= (1 << bitindex);
+               bits |= BIT(bitindex);
        else
                /* Turn off this VLAN id */
-               bits &= ~(1 << bitindex);
+               bits &= ~BIT(bitindex);
        IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
 
        return 0;
index 01519787324aeeebdbc223314008f6a2639f3667..47afed74a54d9791d64f1fd7d919424d207fd824 100644 (file)
@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
 do { \
        u32 n = (_n); \
-       if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+       if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
                common_hash ^= lo_hash_dword >> n; \
-       else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+       else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
                bucket_hash ^= lo_hash_dword >> n; \
-       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
                sig_hash ^= lo_hash_dword << (16 - n); \
-       if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+       if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
                common_hash ^= hi_hash_dword >> n; \
-       else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+       else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
                bucket_hash ^= hi_hash_dword >> n; \
-       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
                sig_hash ^= hi_hash_dword << (16 - n); \
 } while (0)
 
@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
 do { \
        u32 n = (_n); \
-       if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+       if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
                bucket_hash ^= lo_hash_dword >> n; \
-       if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+       if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
                bucket_hash ^= hi_hash_dword >> n; \
 } while (0)
 
index 737443a015d5cc63dfcaec31cc25b588471b3ee1..902d2061ce73a32e44e7a48a1547f0c8205ba9a4 100644 (file)
@@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
                         */
                        eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
                                            IXGBE_EEC_SIZE_SHIFT);
-                       eeprom->word_size = 1 << (eeprom_size +
-                                                 IXGBE_EEPROM_WORD_SIZE_SHIFT);
+                       eeprom->word_size = BIT(eeprom_size +
+                                                IXGBE_EEPROM_WORD_SIZE_SHIFT);
                }
 
                if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
         * Mask is used to shift "count" bits of "data" out to the EEPROM
         * one bit at a time.  Determine the starting bit based on count
         */
-       mask = 0x01 << (count - 1);
+       mask = BIT(count - 1);
 
        for (i = 0; i < count; i++) {
                /*
@@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
         */
        vector_reg = (vector >> 5) & 0x7F;
        vector_bit = vector & 0x1F;
-       hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+       hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
 }
 
 /**
@@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
                        mpsar_hi = 0;
                }
        } else if (vmdq < 32) {
-               mpsar_lo &= ~(1 << vmdq);
+               mpsar_lo &= ~BIT(vmdq);
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
        } else {
-               mpsar_hi &= ~(1 << (vmdq - 32));
+               mpsar_hi &= ~BIT(vmdq - 32);
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
        }
 
@@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 
        if (vmdq < 32) {
                mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-               mpsar |= 1 << vmdq;
+               mpsar |= BIT(vmdq);
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
        } else {
                mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-               mpsar |= 1 << (vmdq - 32);
+               mpsar |= BIT(vmdq - 32);
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
        }
        return 0;
@@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
        u32 rar = hw->mac.san_mac_rar_index;
 
        if (vmdq < 32) {
-               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
        } else {
                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
        }
 
        return 0;
@@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
         *    bits[4-0]:  which bit in the register
         */
        regidx = vlan / 32;
-       vfta_delta = 1 << (vlan % 32);
+       vfta_delta = BIT(vlan % 32);
        vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
 
        /* vfta_delta represents the difference between the current value
@@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
        bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
 
        /* set the pool bit */
-       bits |= 1 << (vind % 32);
+       bits |= BIT(vind % 32);
        if (vlan_on)
                goto vlvf_update;
 
        /* clear the pool bit */
-       bits ^= 1 << (vind % 32);
+       bits ^= BIT(vind % 32);
 
        if (!bits &&
            !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3310,43 +3310,25 @@ wwn_prefix_err:
 /**
  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
  *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for anti-spoofing
- *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *  @enable: enable or disable switch for MAC anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
  *
  **/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
 {
-       int j;
-       int pf_target_reg = pf >> 3;
-       int pf_target_shift = pf % 8;
-       u32 pfvfspoof = 0;
+       int vf_target_reg = vf >> 3;
+       int vf_target_shift = vf % 8;
+       u32 pfvfspoof;
 
        if (hw->mac.type == ixgbe_mac_82598EB)
                return;
 
+       pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
        if (enable)
-               pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
-       /*
-        * PFVFSPOOF register array is size 8 with 8 bits assigned to
-        * MAC anti-spoof enables in each register array element.
-        */
-       for (j = 0; j < pf_target_reg; j++)
-               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-       /*
-        * The PF should be allowed to spoof so that it can support
-        * emulation mode NICs.  Do not set the bits assigned to the PF
-        */
-       pfvfspoof &= (1 << pf_target_shift) - 1;
-       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-       /*
-        * Remaining pools belong to the PF so they do not need to have
-        * anti-spoofing enabled.
-        */
-       for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
-               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+               pfvfspoof |= BIT(vf_target_shift);
+       else
+               pfvfspoof &= ~BIT(vf_target_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
 /**
@@ -3367,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
 
        pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
        if (enable)
-               pfvfspoof |= (1 << vf_target_shift);
+               pfvfspoof |= BIT(vf_target_shift);
        else
-               pfvfspoof &= ~(1 << vf_target_shift);
+               pfvfspoof &= ~BIT(vf_target_shift);
        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
index 6f8e6a56e2420e63e2dc0fe5851601c3bbf3bd1b..6d4c260d0cbdf9e31a8d09e55b5231323a12430b 100644 (file)
@@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
 
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
index f8fb2acc2632d0a433c943301627606706024e81..072ef3b5fc61e3e082e24e2a03498f4934514b4b 100644 (file)
@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
 
        for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
                if (tc_config[tc].dcb_pfc != pfc_disabled)
-                       *pfc_en |= 1 << tc;
+                       *pfc_en |= BIT(tc);
        }
 }
 
@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
 u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
 {
        struct tc_configuration *tc_config = &cfg->tc_config[0];
-       u8 prio_mask = 1 << up;
+       u8 prio_mask = BIT(up);
        u8 tc = cfg->num_tcs.pg_tcs;
 
        /* If tc is 0 then DCB is likely not enabled or supported */
index d3ba63f9ad3712fcf82d1bafc3408070112d1aa0..b79e93a5b699300bceb2f72279f780b2110a9ae9 100644 (file)
@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               if (!(pfc_en & (1 << i))) {
+               if (!(pfc_en & BIT(i))) {
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
                        continue;
index b5cc989a3d23afcada8cca2e754d9320f7a8144a..1011d644978f6fdbfd7e5757ecb996429beb7123 100644 (file)
@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
                int enabled = 0;
 
                for (j = 0; j < MAX_USER_PRIORITY; j++) {
-                       if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+                       if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
                                enabled = 1;
                                break;
                        }
index 2707bda374187fe3bdbb961873cc55b023b3a2e5..b8fc3cfec8310b6ac1b83c97810e22b2e64316b8 100644 (file)
@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
                             };
        u8 up = dcb_getapp(adapter->netdev, &app);
 
-       if (up && !(up & (1 << adapter->fcoe.up)))
+       if (up && !(up & BIT(adapter->fcoe.up)))
                changes |= BIT_APP_UPCHG;
 #endif
 
@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
            app->protocol == ETH_P_FCOE) {
                u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
-               if (app_mask & (1 << adapter->fcoe.up))
+               if (app_mask & BIT(adapter->fcoe.up))
                        return 0;
 
                adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
            app->protocol == ETH_P_FCOE) {
                u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
-               if (app_mask & (1 << adapter->fcoe.up))
+               if (app_mask & BIT(adapter->fcoe.up))
                        return 0;
 
                adapter->fcoe.up = app_mask ?
index 9f76be1431b1ebf9840a6b7d075e1f218d617c76..59b771b9b354b7bf049f6aca9963a2a69ca2c390 100644 (file)
@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
 
        /* Flow Control */
        regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
-       regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
-       regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
-       regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
-       regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+       for (i = 0; i < 4; i++)
+               regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
        for (i = 0; i < 8; i++) {
                switch (hw->mac.type) {
                case ixgbe_mac_82598EB:
@@ -720,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
        regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
        regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
-       regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
-       regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+       regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+       regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+       regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+       regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
        for (i = 0; i < 8; i++)
                regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
        regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -731,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
        regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
        regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
-       regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+       regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+       regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
        regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
        regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
        regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -803,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
                regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
        regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
        regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
-       regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
-       regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
-       regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
-       regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+       for (i = 0; i < 4; i++)
+               regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
        regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
-       regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
-       regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
-       regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
-       regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+       for (i = 0; i < 4; i++)
+               regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
        for (i = 0; i < 8; i++)
                regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
        regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1586,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
        /* Test each interrupt */
        for (; i < 10; i++) {
                /* Interrupt to test */
-               mask = 1 << i;
+               mask = BIT(i);
 
                if (!shared_int) {
                        /*
@@ -3014,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                        info->phc_index = -1;
 
                info->tx_types =
-                       (1 << HWTSTAMP_TX_OFF) |
-                       (1 << HWTSTAMP_TX_ON);
+                       BIT(HWTSTAMP_TX_OFF) |
+                       BIT(HWTSTAMP_TX_ON);
 
                info->rx_filters =
-                       (1 << HWTSTAMP_FILTER_NONE) |
-                       (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+                       BIT(HWTSTAMP_FILTER_NONE) |
+                       BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                       BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                       BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
                break;
        default:
                return ethtool_op_get_ts_info(dev, info);
index 2976df77bf14f58f7e849324d34360ccdf1dfbd5..d08fbcfb9417da0b587b07d3986b2f474e6bf917 100644 (file)
@@ -53,6 +53,7 @@
 #include <net/vxlan.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -371,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
 
        if (ixgbe_removed(reg_addr))
                return IXGBE_FAILED_READ_REG;
+       if (unlikely(hw->phy.nw_mng_if_sel &
+                    IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+               struct ixgbe_adapter *adapter;
+               int i;
+
+               for (i = 0; i < 200; ++i) {
+                       value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+                       if (likely(!value))
+                               goto writes_completed;
+                       if (value == IXGBE_FAILED_READ_REG) {
+                               ixgbe_remove_adapter(hw);
+                               return IXGBE_FAILED_READ_REG;
+                       }
+                       udelay(5);
+               }
+
+               adapter = hw->back;
+               e_warn(hw, "register writes incomplete %08x\n", value);
+       }
+
+writes_completed:
        value = readl(reg_addr + reg);
        if (unlikely(value == IXGBE_FAILED_READ_REG))
                ixgbe_check_remove(hw, reg);
@@ -587,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
                pr_info("%-15s %016lX %016lX %016lX\n",
                        netdev->name,
                        netdev->state,
-                       netdev->trans_start,
+                       dev_trans_start(netdev),
                        netdev->last_rx);
        }
 
@@ -2224,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
        /* Populate MSIX to EITR Select */
        if (adapter->num_vfs > 32) {
-               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
        }
 
@@ -2863,7 +2885,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (adapter->rx_itr_setting & 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
        return 0;
 }
@@ -3156,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
         * currently 40.
         */
        if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
-               txdctl |= (1 << 16);    /* WTHRESH = 1 */
+               txdctl |= 1u << 16;     /* WTHRESH = 1 */
        else
-               txdctl |= (8 << 16);    /* WTHRESH = 8 */
+               txdctl |= 8u << 16;     /* WTHRESH = 8 */
 
        /*
         * Setting PTHRESH to 32 both improves performance
         * and avoids a TX hang with DFP enabled
         */
-       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+       txdctl |= (1u << 8) |   /* HTHRESH = 1 */
                   32;          /* PTHRESH = 32 */
 
        /* reinitialize flowdirector state */
@@ -3716,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3745,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
 
        /* Enable only the PF's pool for Tx/Rx */
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(vf_shift, 31));
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(vf_shift, 31));
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
        if (adapter->bridge_mode == BRIDGE_MODE_VEB)
                IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3776,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
 
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
-
-       /* Enable MAC Anti-Spoofing */
-       hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
-                                         adapter->num_vfs);
-
-       /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
-        * calling set_ethertype_anti_spoofing for each VF in loop below
-        */
-       if (hw->mac.ops.set_ethertype_anti_spoofing) {
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-                               (IXGBE_ETQF_FILTER_EN    |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                IXGBE_ETH_P_LLDP));
-
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
-                               (IXGBE_ETQF_FILTER_EN |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                ETH_P_PAUSE));
-       }
-
-       /* For VFs that have spoof checking turned off */
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (!adapter->vfinfo[i].spoofchk_enabled)
-                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
-               /* enable ethertype anti spoofing if hw supports it */
-               if (hw->mac.ops.set_ethertype_anti_spoofing)
-                       hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+               /* configure spoof checking */
+               ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+                                         adapter->vfinfo[i].spoofchk_enabled);
 
                /* Enable/Disable RSS query feature  */
                ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3997,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
         * entry other than the PF.
         */
        word = idx * 2 + (VMDQ_P(0) / 32);
-       bits = ~(1 << (VMDQ_P(0)) % 32);
+       bits = ~BIT(VMDQ_P(0) % 32);
        bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 
        /* Disable the filter so this falls into the default pool. */
@@ -4132,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
                u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
                u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
 
-               vlvfb |= 1 << (VMDQ_P(0) % 32);
+               vlvfb |= BIT(VMDQ_P(0) % 32);
                IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
        }
 
@@ -4162,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                if (vlvf) {
                        /* record VLAN ID in VFTA */
-                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+                       vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
                        /* if PF is part of this then continue */
                        if (test_bit(vid, adapter->active_vlans))
@@ -4171,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                /* remove PF from the pool */
                word = i * 2 + VMDQ_P(0) / 32;
-               bits = ~(1 << (VMDQ_P(0) % 32));
+               bits = ~BIT(VMDQ_P(0) % 32);
                bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
        }
@@ -4865,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
@@ -4931,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
        /* shutdown specific queue receive and wait for dma to settle */
        ixgbe_disable_rx_queue(adapter, rx_ring);
        usleep_range(10000, 20000);
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+       ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
        ixgbe_clean_rx_ring(rx_ring);
        rx_ring->l2_accel_priv = NULL;
 }
@@ -5290,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 {
        WARN_ON(in_interrupt());
        /* put off any impending NetWatchDogTimeout */
-       adapter->netdev->trans_start = jiffies;
+       netif_trans_update(adapter->netdev);
 
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
@@ -5561,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
        ixgbe_tx_timeout_reset(adapter);
 }
 
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct tc_configuration *tc;
+       int j;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+       case ixgbe_mac_82599EB:
+               adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+               break;
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+               adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+               break;
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+       default:
+               adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+               adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+               break;
+       }
+
+       /* Configure DCB traffic classes */
+       for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+               tc = &adapter->dcb_cfg.tc_config[j];
+               tc->path[DCB_TX_CONFIG].bwg_id = 0;
+               tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+               tc->path[DCB_RX_CONFIG].bwg_id = 0;
+               tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+               tc->dcb_pfc = pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &adapter->dcb_cfg.tc_config[0];
+       tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+       adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+       adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+       adapter->dcb_cfg.pfc_mode_enable = false;
+       adapter->dcb_set_bitmap = 0x00;
+       if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+               adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+       memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+              sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
 /**
  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  * @adapter: board private structure to initialize
@@ -5575,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        struct pci_dev *pdev = adapter->pdev;
        unsigned int rss, fdir;
        u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
-       int j;
-       struct tc_configuration *tc;
-#endif
+       u16 device_caps;
+       int i;
 
        /* PCI config space info */
 
@@ -5600,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCA
        adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
 #endif
+#ifdef CONFIG_IXGBE_DCB
+       adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
 #ifdef IXGBE_FCOE
        adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5610,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* IXGBE_FCOE */
 
        /* initialize static ixgbe jump table entries */
-       adapter->jump_tables[0] = ixgbe_ipv4_fields;
+       adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+                                         GFP_KERNEL);
+       if (!adapter->jump_tables[0])
+               return -ENOMEM;
+       adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+       for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+               adapter->jump_tables[i] = NULL;
 
        adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
                                     hw->mac.num_rar_entries,
@@ -5649,6 +5708,16 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
                break;
        case ixgbe_mac_X550EM_x:
        case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+               adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+               adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+               adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+       /* Fall Through */
        case ixgbe_mac_X550:
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5670,43 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        spin_lock_init(&adapter->fdir_perfect_lock);
 
 #ifdef CONFIG_IXGBE_DCB
-       switch (hw->mac.type) {
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
-       case ixgbe_mac_x550em_a:
-               adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
-               break;
-       default:
-               adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
-               adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
-               break;
-       }
-
-       /* Configure DCB traffic classes */
-       for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
-               tc = &adapter->dcb_cfg.tc_config[j];
-               tc->path[DCB_TX_CONFIG].bwg_id = 0;
-               tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->path[DCB_RX_CONFIG].bwg_id = 0;
-               tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
-               tc->dcb_pfc = pfc_disabled;
-       }
-
-       /* Initialize default user to priority mapping, UPx->TC0 */
-       tc = &adapter->dcb_cfg.tc_config[0];
-       tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
-       tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
-       adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
-       adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
-       adapter->dcb_cfg.pfc_mode_enable = false;
-       adapter->dcb_set_bitmap = 0x00;
-       adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-       memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-              sizeof(adapter->temp_dcb_cfg));
-
+       ixgbe_init_dcb(adapter);
 #endif
 
        /* default flow control settings */
@@ -5740,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
+       /* Cache bit indicating need for crosstalk fix */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+               hw->mac.ops.get_device_caps(hw, &device_caps);
+               if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+                       adapter->need_crosstalk_fix = false;
+               else
+                       adapter->need_crosstalk_fix = true;
+               break;
+       default:
+               adapter->need_crosstalk_fix = false;
+               break;
+       }
+
        /* set default work limits */
        adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 
@@ -6631,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
                for (i = 0; i < adapter->num_q_vectors; i++) {
                        struct ixgbe_q_vector *qv = adapter->q_vector[i];
                        if (qv->rx.ring || qv->tx.ring)
-                               eics |= ((u64)1 << i);
+                               eics |= BIT_ULL(i);
                }
        }
 
@@ -6662,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
                link_up = true;
        }
 
+       /* If Crosstalk fix enabled do the sanity check of making sure
+        * the SFP+ cage is empty.
+        */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+                       link_up = false;
+       }
+
        if (adapter->ixgbe_ieee_pfc)
                pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
@@ -7008,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        s32 err;
 
+       /* If crosstalk fix enabled verify the SFP+ cage is full */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (!sfp_cage_full)
+                       return;
+       }
+
        /* not searching for SFP so there is nothing to do here */
        if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7192,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work)
                return;
        }
 #ifdef CONFIG_IXGBE_VXLAN
+       rtnl_lock();
        if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
                adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
                vxlan_get_rx_port(adapter->netdev);
        }
+       rtnl_unlock();
 #endif /* CONFIG_IXGBE_VXLAN */
        ixgbe_reset_subtask(adapter);
        ixgbe_phy_interrupt_subtask(adapter);
@@ -7218,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
                     u8 *hdr_len)
 {
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
        struct sk_buff *skb = first->skb;
-       u32 vlan_macip_lens, type_tucmd;
-       u32 mss_l4len_idx, l4len;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7233,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        if (err < 0)
                return err;
 
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
+
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
                type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+               ip.v4->tot_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM |
                                   IXGBE_TX_FLAGS_IPV4;
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check =
-                   ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr,
-                                    0, IPPROTO_TCP, 0);
+       } else {
+               ip.v6->payload_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM;
        }
 
-       /* compute header lengths */
-       l4len = tcp_hdrlen(skb);
-       *hdr_len = skb_transport_offset(skb) + l4len;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
        /* mss_l4len_id: use 0 as index for TSO */
-       mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-       vlan_macip_lens = skb_network_header_len(skb);
-       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -8266,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
        return 0;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+                                 u8 *queue, u64 *action)
+{
+       unsigned int num_vfs = adapter->num_vfs, vf;
+       struct net_device *upper;
+       struct list_head *iter;
+
+       /* redirect to a SRIOV VF */
+       for (vf = 0; vf < num_vfs; ++vf) {
+               upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+               if (upper->ifindex == ifindex) {
+                       if (adapter->num_rx_pools > 1)
+                               *queue = vf * 2;
+                       else
+                               *queue = vf * adapter->num_rx_queues_per_pool;
+
+                       *action = vf + 1;
+                       *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+                       return 0;
+               }
+       }
+
+       /* redirect to a offloaded macvlan netdev */
+       netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+               if (netif_is_macvlan(upper)) {
+                       struct macvlan_dev *dfwd = netdev_priv(upper);
+                       struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+                       if (vadapter && vadapter->netdev->ifindex == ifindex) {
+                               *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+                               *action = *queue;
+                               return 0;
+                       }
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       const struct tc_action *a;
+       int err;
+
+       if (tc_no_actions(exts))
+               return -EINVAL;
+
+       tc_for_each_action(a, exts) {
+
+               /* Drop action */
+               if (is_tcf_gact_shot(a)) {
+                       *action = IXGBE_FDIR_DROP_QUEUE;
+                       *queue = IXGBE_FDIR_DROP_QUEUE;
+                       return 0;
+               }
+
+               /* Redirect to a VF or a offloaded macvlan */
+               if (is_tcf_mirred_redirect(a)) {
+                       int ifindex = tcf_mirred_ifindex(a);
+
+                       err = handle_redirect_action(adapter, ifindex, queue,
+                                                    action);
+                       if (err == 0)
+                               return err;
+               }
+       }
+
+       return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+                                   union ixgbe_atr_input *mask,
+                                   struct tc_cls_u32_offload *cls,
+                                   struct ixgbe_mat_field *field_ptr,
+                                   struct ixgbe_nexthdr *nexthdr)
+{
+       int i, j, off;
+       __be32 val, m;
+       bool found_entry = false, found_jump_field = false;
+
+       for (i = 0; i < cls->knode.sel->nkeys; i++) {
+               off = cls->knode.sel->keys[i].off;
+               val = cls->knode.sel->keys[i].val;
+               m = cls->knode.sel->keys[i].mask;
+
+               for (j = 0; field_ptr[j].val; j++) {
+                       if (field_ptr[j].off == off) {
+                               field_ptr[j].val(input, mask, val, m);
+                               input->filter.formatted.flow_type |=
+                                       field_ptr[j].type;
+                               found_entry = true;
+                               break;
+                       }
+               }
+               if (nexthdr) {
+                       if (nexthdr->off == cls->knode.sel->keys[i].off &&
+                           nexthdr->val == cls->knode.sel->keys[i].val &&
+                           nexthdr->mask == cls->knode.sel->keys[i].mask)
+                               found_jump_field = true;
+                       else
+                               continue;
+               }
+       }
+
+       if (nexthdr && !found_jump_field)
+               return -EINVAL;
+
+       if (!found_entry)
+               return 0;
+
+       mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                   IXGBE_ATR_L4TYPE_MASK;
+
+       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+               mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+       return 0;
+}
+
 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                                  __be16 protocol,
                                  struct tc_cls_u32_offload *cls)
@@ -8273,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
        u32 loc = cls->knode.handle & 0xfffff;
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_mat_field *field_ptr;
-       struct ixgbe_fdir_filter *input;
-       union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
-       const struct tc_action *a;
-#endif
-       int i, err = 0;
+       struct ixgbe_fdir_filter *input = NULL;
+       union ixgbe_atr_input *mask = NULL;
+       struct ixgbe_jump_table *jump = NULL;
+       int i, err = -EINVAL;
        u8 queue;
        u32 uhtid, link_uhtid;
 
-       memset(&mask, 0, sizeof(union ixgbe_atr_input));
        uhtid = TC_U32_USERHTID(cls->knode.handle);
        link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
 
@@ -8294,39 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
         * headers when needed.
         */
        if (protocol != htons(ETH_P_IP))
-               return -EINVAL;
-
-       if (link_uhtid) {
-               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
-               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
-                       return -EINVAL;
-
-               if (!test_bit(link_uhtid - 1, &adapter->tables))
-                       return -EINVAL;
-
-               for (i = 0; nexthdr[i].jump; i++) {
-                       if (nexthdr[i].o != cls->knode.sel->offoff ||
-                           nexthdr[i].s != cls->knode.sel->offshift ||
-                           nexthdr[i].m != cls->knode.sel->offmask ||
-                           /* do not support multiple key jumps its just mad */
-                           cls->knode.sel->nkeys > 1)
-                               return -EINVAL;
-
-                       if (nexthdr[i].off == cls->knode.sel->keys[0].off &&
-                           nexthdr[i].val == cls->knode.sel->keys[0].val &&
-                           nexthdr[i].mask == cls->knode.sel->keys[0].mask) {
-                               adapter->jump_tables[link_uhtid] =
-                                                               nexthdr[i].jump;
-                               break;
-                       }
-               }
-               return 0;
-       }
+               return err;
 
        if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
                e_err(drv, "Location out of range\n");
-               return -EINVAL;
+               return err;
        }
 
        /* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8337,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
         * this function _should_ be generic try not to hardcode values here.
         */
        if (uhtid == 0x800) {
-               field_ptr = adapter->jump_tables[0];
+               field_ptr = (adapter->jump_tables[0])->mat;
        } else {
                if (uhtid >= IXGBE_MAX_LINK_HANDLE)
-                       return -EINVAL;
-
-               field_ptr = adapter->jump_tables[uhtid];
+                       return err;
+               if (!adapter->jump_tables[uhtid])
+                       return err;
+               field_ptr = (adapter->jump_tables[uhtid])->mat;
        }
 
        if (!field_ptr)
-               return -EINVAL;
+               return err;
 
-       input = kzalloc(sizeof(*input), GFP_KERNEL);
-       if (!input)
-               return -ENOMEM;
+       /* At this point we know the field_ptr is valid and need to either
+        * build cls_u32 link or attach filter. Because adding a link to
+        * a handle that does not exist is invalid and the same for adding
+        * rules to handles that don't exist.
+        */
 
-       for (i = 0; i < cls->knode.sel->nkeys; i++) {
-               int off = cls->knode.sel->keys[i].off;
-               __be32 val = cls->knode.sel->keys[i].val;
-               __be32 m = cls->knode.sel->keys[i].mask;
-               bool found_entry = false;
-               int j;
+       if (link_uhtid) {
+               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
 
-               for (j = 0; field_ptr[j].val; j++) {
-                       if (field_ptr[j].off == off) {
-                               field_ptr[j].val(input, &mask, val, m);
-                               input->filter.formatted.flow_type |=
-                                       field_ptr[j].type;
-                               found_entry = true;
+               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return err;
+
+               if (!test_bit(link_uhtid - 1, &adapter->tables))
+                       return err;
+
+               for (i = 0; nexthdr[i].jump; i++) {
+                       if (nexthdr[i].o != cls->knode.sel->offoff ||
+                           nexthdr[i].s != cls->knode.sel->offshift ||
+                           nexthdr[i].m != cls->knode.sel->offmask)
+                               return err;
+
+                       jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+                       if (!jump)
+                               return -ENOMEM;
+                       input = kzalloc(sizeof(*input), GFP_KERNEL);
+                       if (!input) {
+                               err = -ENOMEM;
+                               goto free_jump;
+                       }
+                       mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+                       if (!mask) {
+                               err = -ENOMEM;
+                               goto free_input;
+                       }
+                       jump->input = input;
+                       jump->mask = mask;
+                       err = ixgbe_clsu32_build_input(input, mask, cls,
+                                                      field_ptr, &nexthdr[i]);
+                       if (!err) {
+                               jump->mat = nexthdr[i].jump;
+                               adapter->jump_tables[link_uhtid] = jump;
                                break;
                        }
                }
-
-               if (!found_entry)
-                       goto err_out;
+               return 0;
        }
 
-       mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
-                                  IXGBE_ATR_L4TYPE_MASK;
-
-       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
-               mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+       if (!input)
+               return -ENOMEM;
+       mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+       if (!mask) {
+               err = -ENOMEM;
+               goto free_input;
+       }
 
-#ifdef CONFIG_NET_CLS_ACT
-       if (list_empty(&cls->knode.exts->actions))
+       if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+               if ((adapter->jump_tables[uhtid])->input)
+                       memcpy(input, (adapter->jump_tables[uhtid])->input,
+                              sizeof(*input));
+               if ((adapter->jump_tables[uhtid])->mask)
+                       memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+                              sizeof(*mask));
+       }
+       err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+       if (err)
                goto err_out;
 
-       list_for_each_entry(a, &cls->knode.exts->actions, list) {
-               if (!is_tcf_gact_shot(a))
-                       goto err_out;
-       }
-#endif
+       err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+                              &queue);
+       if (err < 0)
+               goto err_out;
 
-       input->action = IXGBE_FDIR_DROP_QUEUE;
-       queue = IXGBE_FDIR_DROP_QUEUE;
        input->sw_idx = loc;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
        if (hlist_empty(&adapter->fdir_filter_list)) {
-               memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
-               err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+               memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+               err = ixgbe_fdir_set_input_mask_82599(hw, mask);
                if (err)
                        goto err_out_w_lock;
-       } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+       } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
                err = -EINVAL;
                goto err_out_w_lock;
        }
 
-       ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+       ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
        err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
                                                    input->sw_idx, queue);
        if (!err)
                ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
        spin_unlock(&adapter->fdir_perfect_lock);
 
+       kfree(mask);
        return err;
 err_out_w_lock:
        spin_unlock(&adapter->fdir_perfect_lock);
 err_out:
+       kfree(mask);
+free_input:
        kfree(input);
-       return -EINVAL;
+free_jump:
+       kfree(jump);
+       return err;
 }
 
 static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8860,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
        kfree(fwd_adapter);
 }
 
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN          127
+#define IXGBE_MAX_NETWORK_HDR_LEN      511
+
 static netdev_features_t
 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
                     netdev_features_t features)
 {
-       if (!skb->encapsulation)
-               return features;
-
-       if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
-                    IXGBE_MAX_TUNNEL_HDR_LEN))
-               return features & ~NETIF_F_CSUM_MASK;
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
 
        return features;
 }
@@ -8971,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
  * @device_id: the device ID
  * @subdev_id: the subsystem device ID
  *
@@ -8979,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  * which devices have WoL support
  *
  **/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
-                       u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+                        u16 subdevice_id)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-       int is_wol_supported = 0;
 
+       /* WOL not supported on 82598 */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return false;
+
+       /* check eeprom to see if WOL is enabled for X540 and newer */
+       if (hw->mac.type >= ixgbe_mac_X540) {
+               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+                    (hw->bus.func == 0)))
+                       return true;
+       }
+
+       /* WOL is determined based on device IDs for 82599 MACs */
        switch (device_id) {
        case IXGBE_DEV_ID_82599_SFP:
                /* Only these subdevices could supports WOL */
                switch (subdevice_id) {
-               case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
                case IXGBE_SUBDEV_ID_82599_560FLR:
+               case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+               case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+               case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
                        /* only support first port */
                        if (hw->bus.func != 0)
                                break;
@@ -8999,44 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                case IXGBE_SUBDEV_ID_82599_SFP:
                case IXGBE_SUBDEV_ID_82599_RNDC:
                case IXGBE_SUBDEV_ID_82599_ECNA_DP:
-               case IXGBE_SUBDEV_ID_82599_LOM_SFP:
-                       is_wol_supported = 1;
-                       break;
+               case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+               case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+               case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+                       return true;
                }
                break;
        case IXGBE_DEV_ID_82599EN_SFP:
-               /* Only this subdevice supports WOL */
+               /* Only these subdevices support WOL */
                switch (subdevice_id) {
                case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
-                       is_wol_supported = 1;
-                       break;
+                       return true;
                }
                break;
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
                /* All except this subdevice support WOL */
                if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-                       is_wol_supported = 1;
+                       return true;
                break;
        case IXGBE_DEV_ID_82599_KX4:
-               is_wol_supported = 1;
-               break;
-       case IXGBE_DEV_ID_X540T:
-       case IXGBE_DEV_ID_X540T1:
-       case IXGBE_DEV_ID_X550T:
-       case IXGBE_DEV_ID_X550T1:
-       case IXGBE_DEV_ID_X550EM_X_KX4:
-       case IXGBE_DEV_ID_X550EM_X_KR:
-       case IXGBE_DEV_ID_X550EM_X_10G_T:
-               /* check eeprom to see if enabled wol */
-               if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-                   ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-                    (hw->bus.func == 0))) {
-                       is_wol_supported = 1;
-               }
+               return  true;
+       default:
                break;
        }
 
-       return is_wol_supported;
+       return false;
 }
 
 /**
@@ -9154,7 +9395,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_ioremap;
        }
        /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
-       if (!(eec & (1 << 8)))
+       if (!(eec & BIT(8)))
                hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
 
        /* PHY */
@@ -9237,37 +9478,51 @@ skip_sriov:
                           NETIF_F_TSO6 |
                           NETIF_F_RXHASH |
                           NETIF_F_RXCSUM |
-                          NETIF_F_HW_CSUM |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER;
+                          NETIF_F_HW_CSUM;
+
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                   NETIF_F_GSO_GRE_CSUM | \
+                                   NETIF_F_GSO_IPIP | \
+                                   NETIF_F_GSO_SIT | \
+                                   NETIF_F_GSO_UDP_TUNNEL | \
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+       netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+       netdev->features |= NETIF_F_GSO_PARTIAL |
+                           IXGBE_GSO_PARTIAL_FEATURES;
 
        if (hw->mac.type >= ixgbe_mac_82599EB)
                netdev->features |= NETIF_F_SCTP_CRC;
 
        /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
-       netdev->hw_features |= NETIF_F_RXALL |
+       netdev->hw_features |= netdev->features |
+                              NETIF_F_HW_VLAN_CTAG_RX |
+                              NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_RXALL |
                               NETIF_F_HW_L2FW_DOFFLOAD;
 
        if (hw->mac.type >= ixgbe_mac_82599EB)
                netdev->hw_features |= NETIF_F_NTUPLE |
                                       NETIF_F_HW_TC;
 
-       netdev->vlan_features |= NETIF_F_SG |
-                                NETIF_F_TSO |
-                                NETIF_F_TSO6 |
-                                NETIF_F_HW_CSUM |
-                                NETIF_F_SCTP_CRC;
+       if (pci_using_dac)
+               netdev->features |= NETIF_F_HIGHDMA;
+
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
 
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->hw_enc_features |= netdev->vlan_features;
        netdev->mpls_features |= NETIF_F_HW_CSUM;
-       netdev->hw_enc_features |= NETIF_F_HW_CSUM;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
 #ifdef CONFIG_IXGBE_DCB
-       netdev->dcbnl_ops = &dcbnl_ops;
+       if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+               netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
@@ -9292,10 +9547,6 @@ skip_sriov:
                                         NETIF_F_FCOE_MTU;
        }
 #endif /* IXGBE_FCOE */
-       if (pci_using_dac) {
-               netdev->features |= NETIF_F_HIGHDMA;
-               netdev->vlan_features |= NETIF_F_HIGHDMA;
-       }
 
        if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
                netdev->hw_features |= NETIF_F_LRO;
@@ -9461,6 +9712,7 @@ err_sw_init:
        ixgbe_disable_sriov(adapter);
        adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
        iounmap(adapter->io_addr);
+       kfree(adapter->jump_tables[0]);
        kfree(adapter->mac_table);
 err_ioremap:
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9489,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev;
        bool disable_dev;
+       int i;
 
        /* if !adapter then we already cleaned up in probe */
        if (!adapter)
@@ -9538,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
        e_dev_info("complete\n");
 
+       for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+               if (adapter->jump_tables[i]) {
+                       kfree(adapter->jump_tables[i]->input);
+                       kfree(adapter->jump_tables[i]->mask);
+               }
+               kfree(adapter->jump_tables[i]);
+       }
+
        kfree(adapter->mac_table);
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
index b2125e358f7b798302e4bff9ffb1dcc624e8a319..a0cb84381cd060dfb2eae7d3f9e10eeef0cd8752 100644 (file)
@@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
                break;
        }
 
-       if (vflre & (1 << vf_shift)) {
-               IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+       if (vflre & BIT(vf_shift)) {
+               IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
                hw->mbx.stats.rsts++;
                return 0;
        }
index 60adde55a8c36c3ff525d6de3cfe6549cbd9015f..a8bed3d887f7155468b3fb49efcde821cfaa5a14 100644 (file)
@@ -38,6 +38,12 @@ struct ixgbe_mat_field {
        unsigned int type;
 };
 
+struct ixgbe_jump_table {
+       struct ixgbe_mat_field *mat;
+       struct ixgbe_fdir_filter *input;
+       union ixgbe_atr_input *mask;
+};
+
 static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
                                     union ixgbe_atr_input *mask,
                                     u32 val, u32 m)
index cdf4c38008015739e29fb7286b5bf2a1783867c3..cc735ec3e045f74facb025ebdf0a3d147d01f065 100644 (file)
 #define IXGBE_PE                               0xE0    /* Port expander addr */
 #define IXGBE_PE_OUTPUT                                1       /* Output reg offset */
 #define IXGBE_PE_CONFIG                                3       /* Config reg offset */
-#define IXGBE_PE_BIT1                          (1 << 1)
+#define IXGBE_PE_BIT1                          BIT(1)
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
index bdc8fdcc07a50c3b48261bc9a396acf9c12a158c..e5431bfe3339be0214ac58d158544aa9c82d8105 100644 (file)
@@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
                if (incval > 0x00FFFFFFULL)
                        e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
                IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
-                               (1 << IXGBE_INCPER_SHIFT_82599) |
+                               BIT(IXGBE_INCPER_SHIFT_82599) |
                                ((u32)incval & 0x00FFFFFFUL));
                break;
        default:
@@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
                incval >>= IXGBE_INCVAL_SHIFT_82599;
                cc.shift -= IXGBE_INCVAL_SHIFT_82599;
                IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
-                               (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+                               BIT(IXGBE_INCPER_SHIFT_82599) | incval);
                break;
        default:
                /* other devices aren't supported */
index adcf00002483f2b1510b64287e335338edc115a7..c5caacdd193d39dca9e10287ccaac3964ada5e4e 100644 (file)
@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
                vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
                vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
                mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-               mta_reg |= (1 << vector_bit);
+               mta_reg |= BIT(vector_bit);
                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
        }
        vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
                        vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
                        vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
                        mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-                       mta_reg |= (1 << vector_bit);
+                       mta_reg |= BIT(vector_bit);
                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
                }
 
@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
                /* enable or disable receive depending on error */
                vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
                if (err)
-                       vfre &= ~(1 << vf_shift);
+                       vfre &= ~BIT(vf_shift);
                else
-                       vfre |= 1 << vf_shift;
+                       vfre |= BIT(vf_shift);
                IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
 
                if (err) {
@@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
        u32 vlvfb_mask, pool_mask, i;
 
        /* create mask for VF and other pools */
-       pool_mask = ~(1 << (VMDQ_P(0) % 32));
-       vlvfb_mask = 1 << (vf % 32);
+       pool_mask = ~BIT(VMDQ_P(0) % 32);
+       vlvfb_mask = BIT(vf % 32);
 
        /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
        for (i = IXGBE_VLVF_ENTRIES; i--;) {
@@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
                        goto update_vlvfb;
 
                vid = vlvf & VLAN_VID_MASK;
-               mask = 1 << (vid % 32);
+               mask = BIT(vid % 32);
 
                /* clear bit from VFTA */
                vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 
        /* enable transmit for vf */
        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-       reg |= 1 << vf_shift;
+       reg |= BIT(vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 
        /* force drop enable for all VF Rx queues */
@@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 
        /* enable receive for vf */
        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-       reg |= 1 << vf_shift;
+       reg |= BIT(vf_shift);
        /*
         * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
         * For more info take a look at ixgbe_set_vf_lpe
@@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 
 #endif /* CONFIG_FCOE */
                if (pf_max_frame > ETH_FRAME_LEN)
-                       reg &= ~(1 << vf_shift);
+                       reg &= ~BIT(vf_shift);
        }
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 
@@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 
        /* Enable counting of spoofed packets in the SSVPC register */
        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
-       reg |= (1 << vf_shift);
+       reg |= BIT(vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 
        /*
@@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
        u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
        u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
        u8 tcs = netdev_get_num_tc(adapter->netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       int err;
 
        if (adapter->vfinfo[vf].pf_vlan || tcs) {
                e_warn(drv,
@@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
        if (!vid && !add)
                return 0;
 
-       err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
-       if (err)
-               return err;
-
-       if (adapter->vfinfo[vf].spoofchk_enabled)
-               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
-       if (add)
-               adapter->vfinfo[vf].vlan_count++;
-       else if (adapter->vfinfo[vf].vlan_count)
-               adapter->vfinfo[vf].vlan_count--;
-
-       return 0;
+       return ixgbe_set_vf_vlan(adapter, add, vid, vf);
 }
 
 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -964,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
                 * If the VF is allowed to set MAC filters then turn off
                 * anti-spoofing to avoid false positives.
                 */
-               if (adapter->vfinfo[vf].spoofchk_enabled)
-                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+               if (adapter->vfinfo[vf].spoofchk_enabled) {
+                       struct ixgbe_hw *hw = &adapter->hw;
+
+                       hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+               }
        }
 
        err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1321,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
 
        ixgbe_set_vmvir(adapter, vlan, qos, vf);
        ixgbe_set_vmolr(hw, vf, false);
-       if (adapter->vfinfo[vf].spoofchk_enabled)
-               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-       adapter->vfinfo[vf].vlan_count++;
 
        /* enable hide vlan on X550 */
        if (hw->mac.type >= ixgbe_mac_X550)
@@ -1356,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
        ixgbe_set_vf_vlan(adapter, true, 0, vf);
        ixgbe_clear_vmvir(adapter, vf);
        ixgbe_set_vmolr(hw, vf, true);
-       hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
-       if (adapter->vfinfo[vf].vlan_count)
-               adapter->vfinfo[vf].vlan_count--;
 
        /* disable hide VLAN on X550 */
        if (hw->mac.type >= ixgbe_mac_X550)
@@ -1525,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int vf_target_reg = vf >> 3;
-       int vf_target_shift = vf % 8;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 regval;
 
        if (vf >= adapter->num_vfs)
                return -EINVAL;
 
        adapter->vfinfo[vf].spoofchk_enabled = setting;
 
-       regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-       regval &= ~(1 << vf_target_shift);
-       regval |= (setting << vf_target_shift);
-       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
-
-       if (adapter->vfinfo[vf].vlan_count) {
-               vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
-               regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-               regval &= ~(1 << vf_target_shift);
-               regval |= (setting << vf_target_shift);
-               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+       /* configure MAC spoofing */
+       hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
+
+       /* configure VLAN spoofing */
+       hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+       /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+        * calling set_ethertype_anti_spoofing for each VF in loop below
+        */
+       if (hw->mac.ops.set_ethertype_anti_spoofing) {
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+                               (IXGBE_ETQF_FILTER_EN    |
+                                IXGBE_ETQF_TX_ANTISPOOF |
+                                IXGBE_ETH_P_LLDP));
+
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+                               (IXGBE_ETQF_FILTER_EN |
+                                IXGBE_ETQF_TX_ANTISPOOF |
+                                ETH_P_PAUSE));
+
+               hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
        }
 
        return 0;
index ba3b837c7e9d3b06a69583c314441fe8fa2f57bc..da3d8358fee0c9b981a9c61b8c1403a24dac1bfb 100644 (file)
 #define IXGBE_SUBDEV_ID_82599_RNDC       0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
 #define IXGBE_SUBDEV_ID_82599_SP_560FLR  0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6                0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP         0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP         0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1     0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2     0x06EE
 #define IXGBE_SUBDEV_ID_82599_ECNA_DP    0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP    0x8976
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
 #define IXGBE_DEV_ID_82599EN_SFP         0x1557
 #define IXGBE_DEV_ID_X550EM_A_SFP      0x15CE
 
 /* VF Device IDs */
-#define IXGBE_DEV_ID_X550_VF_HV        0x1564
-#define IXGBE_DEV_ID_X550_VF           0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV    0x15A9
-#define IXGBE_DEV_ID_82599_VF           0x10ED
-#define IXGBE_DEV_ID_X540_VF            0x1515
+#define IXGBE_DEV_ID_82599_VF          0x10ED
+#define IXGBE_DEV_ID_X540_VF           0x1515
 #define IXGBE_DEV_ID_X550_VF           0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
 #define IXGBE_DEV_ID_X550EM_A_VF       0x15C5
@@ -548,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
 /* DCB registers */
 #define MAX_TRAFFIC_CLASS        8
 #define X540_TRAFFIC_CLASS       4
+#define DEF_TRAFFIC_CLASS        1
 #define IXGBE_RMCS      0x03D00
 #define IXGBE_DPMCS     0x07F40
 #define IXGBE_PDPMCS    0x0CD00
@@ -697,16 +698,16 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
 #define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
 #define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID      BIT(0)    /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE   (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX    BIT(7)    /* 0: Initiator, 1: Target */
 #define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
 #define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
 #define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
 #define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
 #define IXGBE_FCBUFF_OFFSET_SHIFT    16
-#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
-#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_WE        BIT(14)   /* Write enable */
+#define IXGBE_FCDMARW_RE        BIT(15)   /* Read enable */
 #define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
 #define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
 #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -723,23 +724,23 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_FCFLT     0x05108 /* FC FLT Context */
 #define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
 #define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_VALID       BIT(0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST       BIT(1)   /* Filter First */
 #define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
 #define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
-#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT    BIT(13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE        BIT(14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE        BIT(15)  /* Read Enable */
 /* FCoE Receive Control */
 #define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI  BIT(0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD   BIT(1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH  BIT(2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH     BIT(4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC     BIT(6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO  BIT(7)   /* FC CRC Byte Ordering */
 #define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
 #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
 /* FCoE Redirection */
@@ -1060,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
 #define IXGBE_TDPROBE     0x07F20
 #define IXGBE_TXBUFCTRL   0x0C600
-#define IXGBE_TXBUFDATA0  0x0C610
-#define IXGBE_TXBUFDATA1  0x0C614
-#define IXGBE_TXBUFDATA2  0x0C618
-#define IXGBE_TXBUFDATA3  0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
 #define IXGBE_RXBUFCTRL   0x03600
-#define IXGBE_RXBUFDATA0  0x03610
-#define IXGBE_RXBUFDATA1  0x03614
-#define IXGBE_RXBUFDATA2  0x03618
-#define IXGBE_RXBUFDATA3  0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
 #define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */
 #define IXGBE_RFVAL     0x050A4
 #define IXGBE_MDFTC1    0x042B8
@@ -1131,6 +1126,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_XPCSS     0x04290
 #define IXGBE_MFLCN     0x04294
 #define IXGBE_SERDESC   0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
 #define IXGBE_MACS      0x0429C
 #define IXGBE_AUTOC     0x042A0
 #define IXGBE_LINKS     0x042A4
@@ -1255,20 +1251,20 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
 #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */
 #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
 
 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
 #define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
 
 /* MSCA Bit Masks */
@@ -1747,7 +1743,7 @@ enum {
 #define IXGBE_ETQF_TX_ANTISPOOF        0x20000000 /* bit 29 */
 #define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
 #define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE   BIT(26) /* bit 26 */
 #define IXGBE_ETQF_POOL_SHIFT          20
 
 #define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */
@@ -1873,20 +1869,20 @@ enum {
 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9
 #define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180
 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7
-#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI   (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4    (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4    (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX      (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX      (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI     (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX   (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
 
 #define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000
 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000
 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR  (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK  0x50000000
 #define IXGBE_AUTOC2_LINK_DISABLE_MASK        0x70000000
 
@@ -2123,6 +2119,7 @@ enum {
 #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR      BIT(7)
 #define IXGBE_FW_LESM_PARAMETERS_PTR     0x2
 #define IXGBE_FW_LESM_STATE_1            0x1
 #define IXGBE_FW_LESM_STATE_ENABLED      0x8000 /* LESM Enable bit */
@@ -2838,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
 #define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF       (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC    (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE      (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS      (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N     (0u << 10)  /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T     (1u << 10)  /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI    (2u << 10)  /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A     (3u << 10)  /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (3u << 10)  /* FC EOF index */
 #define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
 #define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
 
@@ -3581,7 +3578,7 @@ struct ixgbe_info {
 
 #define IXGBE_FUSES0_GROUP(_i)         (0x11158 + ((_i) * 4))
 #define IXGBE_FUSES0_300MHZ            BIT(5)
-#define IXGBE_FUSES0_REV_MASK          (3 << 6)
+#define IXGBE_FUSES0_REV_MASK          (3u << 6)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
 #define IXGBE_KRM_LINK_CTRL_1(P)       ((P) ? 0x820C : 0x420C)
@@ -3595,25 +3592,25 @@ struct ixgbe_info {
 #define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P) ? 0x9520 : 0x5520)
 #define IXGBE_KRM_RX_ANA_CTL(P)                ((P) ? 0x9A00 : 0x5A00)
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B           (1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS         (1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B           BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS         BIT(11)
 
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK    (0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G      (2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G     (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK    (7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G      (2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G     (4u << 8)
 #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN         BIT(12)
 #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN     BIT(13)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ          (1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC          (1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX           (1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR           (1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX          (1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR          (1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE           (1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART          (1 << 31)
-
-#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE                  (1 << 28)
-#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE                  (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ          BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC          BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX           BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR           BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX          BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR          BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE           BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART          BIT(31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE                  BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE                  BIT(29)
 
 #define IXGBE_KRM_AN_CNTL_8_LINEAR                     BIT(0)
 #define IXGBE_KRM_AN_CNTL_8_LIMITING                   BIT(1)
@@ -3621,28 +3618,28 @@ struct ixgbe_info {
 #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D       BIT(12)
 #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D                BIT(19)
 
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN                        (1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN           (1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN          (1 << 16)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN                        BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN           BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN          BIT(16)
 
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL  (1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS   (1 << 2)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL  BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS   BIT(2)
 
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK      (0x3 << 16)
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK      (3u << 16)
 
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN     (1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN      (1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN             (1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN             (1 << 31)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN     BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN      BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN             BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN             BIT(31)
 
 #define IXGBE_KX4_LINK_CNTL_1                          0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX           (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4          (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX          (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4         (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE           (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP       (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART          (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX           BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4          BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX          BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4         BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE           BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP       BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART          BIT(31)
 
 #define IXGBE_SB_IOSF_INDIRECT_CTRL            0x00011144
 #define IXGBE_SB_IOSF_INDIRECT_DATA            0x00011148
@@ -3658,7 +3655,7 @@ struct ixgbe_info {
 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK  0x7
 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT          31
-#define IXGBE_SB_IOSF_CTRL_BUSY                (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY                BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
 #define IXGBE_SB_IOSF_TARGET_KR_PHY    0
 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY        1
 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0  2
index 40824d85d807f3bcf3a181226aa01465a6c2ab6c..f2b1d48a16c3ac7234ffd26b3ab1aef87e2988b0 100644 (file)
@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
                eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
                eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
                                    IXGBE_EEC_SIZE_SHIFT);
-               eeprom->word_size = 1 << (eeprom_size +
-                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+               eeprom->word_size = BIT(eeprom_size +
+                                       IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
                       eeprom->type, eeprom->word_size);
index c71e93ed44512d84d41639185759df19cf028050..19b75cd986820380507f11f6031903e51348d908 100644 (file)
@@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
                eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
                eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
                                    IXGBE_EEC_SIZE_SHIFT);
-               eeprom->word_size = 1 << (eeprom_size +
-                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+               eeprom->word_size = BIT(eeprom_size +
+                                       IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
                       eeprom->type, eeprom->word_size);
@@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
 
        pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
        if (enable)
-               pfvfspoof |= (1 << vf_target_shift);
+               pfvfspoof |= BIT(vf_target_shift);
        else
-               pfvfspoof &= ~(1 << vf_target_shift);
+               pfvfspoof &= ~BIT(vf_target_shift);
 
        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
@@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
                        ixgbe_release_swfw_sync_X540(hw, hmask);
                if (status != IXGBE_ERR_TOKEN_RETRY)
                        return status;
-               udelay(FW_PHY_TOKEN_DELAY * 1000);
+               msleep(FW_PHY_TOKEN_DELAY);
        }
 
        return status;
@@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
        .get_media_type         = &ixgbe_get_media_type_X550em,
        .get_san_mac_addr       = NULL,
        .get_wwn_prefix         = NULL,
-       .setup_link             = NULL, /* defined later */
+       .setup_link             = &ixgbe_setup_mac_link_X540,
        .get_link_capabilities  = &ixgbe_get_link_capabilities_X550em,
        .get_bus_info           = &ixgbe_get_bus_info_X550em,
        .setup_sfp              = ixgbe_setup_sfp_modules_X550em,
@@ -2932,7 +2932,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
        .setup_sfp              = ixgbe_setup_sfp_modules_X550em,
        .acquire_swfw_sync      = ixgbe_acquire_swfw_sync_x550em_a,
        .release_swfw_sync      = ixgbe_release_swfw_sync_x550em_a,
-       .setup_fc               = ixgbe_setup_fc_generic,
+       .setup_fc               = ixgbe_setup_fc_x550em,
        .read_iosf_sb_reg       = ixgbe_read_iosf_sb_reg_x550a,
        .write_iosf_sb_reg      = ixgbe_write_iosf_sb_reg_x550a,
 };
index 58434584b16d9034f762ba85c71ab8251642e66e..ae09d60e7b67a8d5b765c7327d819e4f2becf5cb 100644 (file)
 #define IXGBE_DEV_ID_X550_VF           0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
 
+#define IXGBE_DEV_ID_82599_VF_HV       0x152E
+#define IXGBE_DEV_ID_X540_VF_HV                0x1530
+#define IXGBE_DEV_ID_X550_VF_HV                0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV    0x15A9
+
 #define IXGBE_VF_IRQ_CLEAR_MASK                7
 #define IXGBE_VF_MAX_TX_QUEUES         8
 #define IXGBE_VF_MAX_RX_QUEUES         8
@@ -74,7 +79,7 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_RXDCTL_RLPML_EN  0x00008000
 
 /* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
 
 /* PSRTYPE bit definitions */
 #define IXGBE_PSRTYPE_TCPHDR   0x00000010
@@ -296,16 +301,16 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_TXDCTL_SWFLSH            0x04000000 /* Tx Desc. wr-bk flushing */
 #define IXGBE_TXDCTL_WTHRESH_SHIFT     16         /* shift to WTHRESH bits */
 
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN   (1 << 5)  /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN   (1 << 6)  /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN   (1 << 7)  /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN   (1 << 9)  /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN   (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN   (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN   (1 << 5)  /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN   (1 << 9)  /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN   (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN   (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN   BIT(5)  /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN   BIT(6)  /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN   BIT(7)  /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN   BIT(9)  /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN   BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN   BIT(15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN   BIT(5)  /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN   BIT(9)  /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN   BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN   BIT(13) /* Tx rd data Relax Order */
 
 #endif /* _IXGBEVF_DEFINES_H_ */
index d7aa4b203f40a39ccf194c4693012e9a37b65a9f..508e72c5f1c21b44d9176020993542dd6dfb4b6b 100644 (file)
 
 #define IXGBE_ALL_RAR_ENTRIES 16
 
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
 struct ixgbe_stats {
        char stat_string[ETH_GSTRING_LEN];
-       struct {
-               int sizeof_stat;
-               int stat_offset;
-               int base_stat_offset;
-               int saved_reset_offset;
-       };
+       int type;
+       int sizeof_stat;
+       int stat_offset;
 };
 
-#define IXGBEVF_STAT(m, b, r) { \
-       .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
-       .stat_offset = offsetof(struct ixgbevf_adapter, m), \
-       .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
-       .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+       .stat_string = _name, \
+       .type = IXGBEVF_STATS, \
+       .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+       .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
 }
 
-#define IXGBEVF_ZSTAT(m) { \
-       .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
-       .stat_offset = offsetof(struct ixgbevf_adapter, m), \
-       .base_stat_offset = -1, \
-       .saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+       .stat_string = #_net_stat, \
+       .type = NETDEV_STATS, \
+       .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+       .stat_offset = offsetof(struct net_device_stats, _net_stat) \
 }
 
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
-       {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
-                                   stats.saved_reset_vfgprc)},
-       {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
-                                   stats.saved_reset_vfgptc)},
-       {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
-                                 stats.saved_reset_vfgorc)},
-       {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
-                                 stats.saved_reset_vfgotc)},
-       {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
-       {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
-       {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
-       {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
-                                  stats.saved_reset_vfmprc)},
-       {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
-       {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
-       {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
-       {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
-       {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
-       {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
-       {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+       IXGBEVF_NETDEV_STAT(rx_packets),
+       IXGBEVF_NETDEV_STAT(tx_packets),
+       IXGBEVF_NETDEV_STAT(rx_bytes),
+       IXGBEVF_NETDEV_STAT(tx_bytes),
+       IXGBEVF_STAT("tx_busy", tx_busy),
+       IXGBEVF_STAT("tx_restart_queue", restart_queue),
+       IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+       IXGBEVF_NETDEV_STAT(multicast),
+       IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
 };
 
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+       (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+        ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+        (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
 
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)",
        "Link test   (on/offline)"
 };
 
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
 
 static int ixgbevf_get_settings(struct net_device *netdev,
                                struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
 
        memset(p, 0, regs_len);
 
-       regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+       /* generate a number suitable for ethtool's register version */
+       regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
 
        /* General Registers */
        regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@ clear_reset:
        return err;
 }
 
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
 {
        switch (stringset) {
        case ETH_SS_TEST:
-               return IXGBE_TEST_LEN;
+               return IXGBEVF_TEST_LEN;
        case ETH_SS_STATS:
-               return IXGBE_GLOBAL_STATS_LEN;
+               return IXGBEVF_STATS_LEN;
        default:
                return -EINVAL;
        }
@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                                      struct ethtool_stats *stats, u64 *data)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       char *base = (char *)adapter;
-       int i;
-#ifdef BP_EXTENDED_STATS
-       u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
-           tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+       struct rtnl_link_stats64 temp;
+       const struct rtnl_link_stats64 *net_stats;
+       unsigned int start;
+       struct ixgbevf_ring *ring;
+       int i, j;
+       char *p;
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               rx_yields += adapter->rx_ring[i]->stats.yields;
-               rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
-               rx_yields += adapter->rx_ring[i]->stats.yields;
-       }
+       ixgbevf_update_stats(adapter);
+       net_stats = dev_get_stats(netdev, &temp);
+       for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+               switch (ixgbevf_gstrings_stats[i].type) {
+               case NETDEV_STATS:
+                       p = (char *)net_stats +
+                                       ixgbevf_gstrings_stats[i].stat_offset;
+                       break;
+               case IXGBEVF_STATS:
+                       p = (char *)adapter +
+                                       ixgbevf_gstrings_stats[i].stat_offset;
+                       break;
+               default:
+                       data[i] = 0;
+                       continue;
+               }
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               tx_yields += adapter->tx_ring[i]->stats.yields;
-               tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
-               tx_yields += adapter->tx_ring[i]->stats.yields;
+               data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
 
-       adapter->bp_rx_yields = rx_yields;
-       adapter->bp_rx_cleaned = rx_cleaned;
-       adapter->bp_rx_missed = rx_missed;
+       /* populate Tx queue data */
+       for (j = 0; j < adapter->num_tx_queues; j++) {
+               ring = adapter->tx_ring[j];
+               if (!ring) {
+                       data[i++] = 0;
+                       data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+                       data[i++] = 0;
+                       data[i++] = 0;
+                       data[i++] = 0;
+#endif
+                       continue;
+               }
 
-       adapter->bp_tx_yields = tx_yields;
-       adapter->bp_tx_cleaned = tx_cleaned;
-       adapter->bp_tx_missed = tx_missed;
+               do {
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
+                       data[i]   = ring->stats.packets;
+                       data[i + 1] = ring->stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+               i += 2;
+#ifdef BP_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i + 1] = ring->stats.misses;
+               data[i + 2] = ring->stats.cleaned;
+               i += 3;
 #endif
+       }
 
-       ixgbevf_update_stats(adapter);
-       for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-               char *p = base + ixgbe_gstrings_stats[i].stat_offset;
-               char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
-               char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
-               if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
-                       if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
-                               data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
-                       else
-                               data[i] = *(u64 *)p;
-               } else {
-                       if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
-                               data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
-                       else
-                               data[i] = *(u32 *)p;
+       /* populate Rx queue data */
+       for (j = 0; j < adapter->num_rx_queues; j++) {
+               ring = adapter->rx_ring[j];
+               if (!ring) {
+                       data[i++] = 0;
+                       data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+                       data[i++] = 0;
+                       data[i++] = 0;
+                       data[i++] = 0;
+#endif
+                       continue;
                }
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
+                       data[i]   = ring->stats.packets;
+                       data[i + 1] = ring->stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+               i += 2;
+#ifdef BP_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i + 1] = ring->stats.misses;
+               data[i + 2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
 }
 
 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
                                u8 *data)
 {
+       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        char *p = (char *)data;
        int i;
 
        switch (stringset) {
        case ETH_SS_TEST:
                memcpy(data, *ixgbe_gstrings_test,
-                      IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+                      IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
-               for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-                       memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+               for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+                       memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
+
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       sprintf(p, "tx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+               }
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       sprintf(p, "rx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+               }
                break;
        }
 }
index 5ac60eefb0cd0e80839e1494d429fae5ec02e051..d5944c391cbbdd6a13094e68a3d7587ed7c91e8e 100644 (file)
@@ -166,10 +166,10 @@ struct ixgbevf_ring {
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
-#define IXGBE_TX_FLAGS_CSUM            (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN            (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO             (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4            (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM            BIT(0)
+#define IXGBE_TX_FLAGS_VLAN            BIT(1)
+#define IXGBE_TX_FLAGS_TSO             BIT(2)
+#define IXGBE_TX_FLAGS_IPV4            BIT(3)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0x0000e000
 #define IXGBE_TX_FLAGS_VLAN_SHIFT      16
@@ -422,16 +422,6 @@ struct ixgbevf_adapter {
        unsigned int tx_ring_count;
        unsigned int rx_ring_count;
 
-#ifdef BP_EXTENDED_STATS
-       u64 bp_rx_yields;
-       u64 bp_rx_cleaned;
-       u64 bp_rx_missed;
-
-       u64 bp_tx_yields;
-       u64 bp_tx_cleaned;
-       u64 bp_tx_missed;
-#endif
-
        u8 __iomem *io_addr; /* Mainly for iounmap use */
        u32 link_speed;
        bool link_up;
@@ -460,9 +450,13 @@ enum ixbgevf_state_t {
 
 enum ixgbevf_boards {
        board_82599_vf,
+       board_82599_vf_hv,
        board_X540_vf,
+       board_X540_vf_hv,
        board_X550_vf,
+       board_X550_vf_hv,
        board_X550EM_x_vf,
+       board_X550EM_x_vf_hv,
 };
 
 enum ixgbevf_xcast_modes {
@@ -477,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info;
 extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
 extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
 /* needed by ethtool.c */
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
index 007cbe09499097fd497cfca0c5eac9ebdb5595e4..5e348b1250901bcfe8549d892f0159141bdc2a37 100644 (file)
@@ -62,10 +62,14 @@ static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2015 Intel Corporation.";
 
 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
-       [board_82599_vf] = &ixgbevf_82599_vf_info,
-       [board_X540_vf]  = &ixgbevf_X540_vf_info,
-       [board_X550_vf]  = &ixgbevf_X550_vf_info,
-       [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+       [board_82599_vf]        = &ixgbevf_82599_vf_info,
+       [board_82599_vf_hv]     = &ixgbevf_82599_vf_hv_info,
+       [board_X540_vf]         = &ixgbevf_X540_vf_info,
+       [board_X540_vf_hv]      = &ixgbevf_X540_vf_hv_info,
+       [board_X550_vf]         = &ixgbevf_X550_vf_info,
+       [board_X550_vf_hv]      = &ixgbevf_X550_vf_hv_info,
+       [board_X550EM_x_vf]     = &ixgbevf_X550EM_x_vf_info,
+       [board_X550EM_x_vf_hv]  = &ixgbevf_X550EM_x_vf_hv_info,
 };
 
 /* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
  */
 static const struct pci_device_id ixgbevf_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
        /* required last entry */
        {0, }
 };
@@ -1056,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
            !test_bit(__IXGBEVF_REMOVING, &adapter->state))
                ixgbevf_irq_enable_queues(adapter,
-                                         1 << q_vector->v_idx);
+                                         BIT(q_vector->v_idx));
 
        return 0;
 }
@@ -1158,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
                }
 
                /* add q_vector eims value to global eims_enable_mask */
-               adapter->eims_enable_mask |= 1 << v_idx;
+               adapter->eims_enable_mask |= BIT(v_idx);
 
                ixgbevf_write_eitr(q_vector);
        }
 
        ixgbevf_set_ivar(adapter, -1, 1, v_idx);
        /* setup eims_other and add value to global eims_enable_mask */
-       adapter->eims_other = 1 << v_idx;
+       adapter->eims_other = BIT(v_idx);
        adapter->eims_enable_mask |= adapter->eims_other;
 }
 
@@ -1589,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
        txdctl |= (8 << 16);    /* WTHRESH = 8 */
 
        /* Setting PTHRESH to 32 both improves performance */
-       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
-                 32;          /* PTHRESH = 32 */
+       txdctl |= (1u << 8) |    /* HTHRESH = 1 */
+                  32;           /* PTHRESH = 32 */
 
        clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
 
@@ -1646,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
                      IXGBE_PSRTYPE_L2HDR;
 
        if (adapter->num_rx_queues > 1)
-               psrtype |= 1 << 29;
+               psrtype |= BIT(29);
 
        IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 }
@@ -1752,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
 
+#ifndef CONFIG_SPARC
        /* enable relaxed ordering */
        IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
                        IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+       IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+                       IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+                       IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
 
        /* reset head and tail pointers */
        IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1795,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
                ixgbevf_setup_vfmrqc(adapter);
 
        /* notify the PF of our intent to use this size of frame */
-       ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+       hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
@@ -1908,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 
        spin_lock_bh(&adapter->mbx_lock);
 
-       hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+       hw->mac.ops.update_xcast_mode(hw, xcast_mode);
 
        /* reprogram multicast list */
        hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -2056,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
        spin_lock_bh(&adapter->mbx_lock);
 
        while (api[idx] != ixgbe_mbox_api_unknown) {
-               err = ixgbevf_negotiate_api_version(hw, api[idx]);
+               err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
                if (!err)
                        break;
                idx++;
@@ -2797,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
                struct ixgbevf_q_vector *qv = adapter->q_vector[i];
 
                if (qv->rx.ring || qv->tx.ring)
-                       eics |= 1 << i;
+                       eics |= BIT(i);
        }
 
        /* Cause software interrupt to ensure rings are cleaned */
@@ -3272,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
                       struct ixgbevf_tx_buffer *first,
                       u8 *hdr_len)
 {
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
        struct sk_buff *skb = first->skb;
-       u32 vlan_macip_lens, type_tucmd;
-       u32 mss_l4len_idx, l4len;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3287,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        if (err < 0)
                return err;
 
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
+
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
                type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+               ip.v4->tot_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM |
                                   IXGBE_TX_FLAGS_IPV4;
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check =
-                   ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr,
-                                    0, IPPROTO_TCP, 0);
+       } else {
+               ip.v6->payload_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM;
        }
 
-       /* compute header lengths */
-       l4len = tcp_hdrlen(skb);
-       *hdr_len += l4len;
-       *hdr_len = skb_transport_offset(skb) + l4len;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
 
-       /* update GSO size and bytecount with header size */
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+       /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
        /* mss_l4len_id: use 1 as index for TSO */
-       mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
-       mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+       mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
 
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-       vlan_macip_lens = skb_network_header_len(skb);
-       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3422,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
 
        /* use index 1 context for TSO/FSO/FCOE */
        if (tx_flags & IXGBE_TX_FLAGS_TSO)
-               olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+               olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
 
        /* Check Context must be set if Tx switch is enabled, which it
         * always is for case where virtual functions are running
@@ -3727,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        netdev->mtu = new_mtu;
 
        /* notify the PF of our intent to use this size of frame */
-       ixgbevf_rlpml_set_vf(hw, max_frame);
+       hw->mac.ops.set_rlpml(hw, max_frame);
 
        return 0;
 }
@@ -3870,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
+#define IXGBEVF_MAX_MAC_HDR_LEN                127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN    511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+                      netdev_features_t features)
+{
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IXGBEVF_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
+
+       return features;
+}
+
 static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_open               = ixgbevf_open,
        .ndo_stop               = ixgbevf_close,
@@ -3888,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbevf_netpoll,
 #endif
-       .ndo_features_check     = passthru_features_check,
+       .ndo_features_check     = ixgbevf_features_check,
 };
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3999,23 +4060,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              NETIF_F_HW_CSUM |
                              NETIF_F_SCTP_CRC;
 
-       netdev->features = netdev->hw_features |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                     NETIF_F_GSO_GRE_CSUM | \
+                                     NETIF_F_GSO_IPIP | \
+                                     NETIF_F_GSO_SIT | \
+                                     NETIF_F_GSO_UDP_TUNNEL | \
+                                     NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
-       netdev->vlan_features |= NETIF_F_SG |
-                                NETIF_F_TSO |
-                                NETIF_F_TSO6 |
-                                NETIF_F_HW_CSUM |
-                                NETIF_F_SCTP_CRC;
+       netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+       netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+                              IXGBEVF_GSO_PARTIAL_FEATURES;
 
-       netdev->mpls_features |= NETIF_F_HW_CSUM;
-       netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+       netdev->features = netdev->hw_features;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
+
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
+
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
        if (IXGBE_REMOVED(hw->hw_addr)) {
index dc68fea4894b4dabab0f900f116d182c7a68c0e3..61a80da8b6f0dec4323f094a245fc3fcbb6bcab0 100644 (file)
@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
        .check_for_rst  = ixgbevf_check_for_rst_vf,
 };
 
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+       .init_params    = ixgbevf_init_mbx_params_vf,
+       .check_for_rst  = ixgbevf_check_for_rst_vf,
+};
index 4d613a4f2a7f2bff97774dcedbaabfe7f8ad26a9..e670d3b19c3c2eabf5e2f95f9cee59d9389d8299 100644 (file)
 #include "vf.h"
 #include "ixgbevf.h"
 
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET           0x201
+
 /**
  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  *  @hw: pointer to hardware structure
@@ -125,6 +131,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        return 0;
 }
 
+/**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+       struct ixgbevf_adapter *adapter = hw->back;
+       int i;
+
+       for (i = 0; i < 6; i++)
+               pci_read_config_byte(adapter->pdev,
+                                    (i + IXGBE_HV_RESET_OFFSET),
+                                    &hw->mac.perm_addr[i]);
+       return 0;
+#else
+       pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+       return -EOPNOTSUPP;
+#endif
+}
+
 /**
  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
  *  @hw: pointer to hardware structure
@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        return ret_val;
 }
 
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+       return -EOPNOTSUPP;
+}
+
 /**
  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
  * @adapter: pointer to the port handle
@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
        return ret_val;
 }
 
+/**
+ *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+                                u32 vmdq)
+{
+       if (ether_addr_equal(addr, hw->mac.perm_addr))
+               return 0;
+
+       return -EOPNOTSUPP;
+}
+
 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
                                       u32 *msg, u16 size)
 {
@@ -472,16 +524,23 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
        return 0;
 }
 
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+                                            struct net_device *netdev)
+{
+       return -EOPNOTSUPP;
+}
+
 /**
  *  ixgbevf_update_xcast_mode - Update Multicast mode
  *  @hw: pointer to the HW structure
- *  @netdev: pointer to net device structure
  *  @xcast_mode: new multicast mode
  *
  *  Updates the Multicast Mode of VF.
  **/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
-                                    struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[2];
@@ -512,6 +571,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
        return 0;
 }
 
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+       return -EOPNOTSUPP;
+}
+
 /**
  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
@@ -550,6 +617,15 @@ mbx_err:
        return err;
 }
 
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                                 bool vlan_on)
+{
+       return -EOPNOTSUPP;
+}
+
 /**
  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
  *  @hw: pointer to hardware structure
@@ -656,11 +732,72 @@ out:
 }
 
 /**
- *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed *speed,
+                                       bool *link_up,
+                                       bool autoneg_wait_to_complete)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       u32 links_reg;
+
+       /* If we were hit with a reset drop the link */
+       if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+               mac->get_link_status = true;
+
+       if (!mac->get_link_status)
+               goto out;
+
+       /* if link status is down no point in checking to see if pf is up */
+       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!(links_reg & IXGBE_LINKS_UP))
+               goto out;
+
+       /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+        * before the link status is correct
+        */
+       if (mac->type == ixgbe_mac_82599_vf) {
+               int i;
+
+               for (i = 0; i < 5; i++) {
+                       udelay(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+                       if (!(links_reg & IXGBE_LINKS_UP))
+                               goto out;
+               }
+       }
+
+       switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+       case IXGBE_LINKS_SPEED_10G_82599:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               break;
+       case IXGBE_LINKS_SPEED_1G_82599:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+       case IXGBE_LINKS_SPEED_100_82599:
+               *speed = IXGBE_LINK_SPEED_100_FULL;
+               break;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link
+        */
+       mac->get_link_status = false;
+
+out:
+       *link_up = !mac->get_link_status;
+       return 0;
+}
+
+/**
+ *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
  *  @hw: pointer to the HW structure
  *  @max_size: value to assign to max frame size
  **/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 {
        u32 msgbuf[2];
 
@@ -670,11 +807,30 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
 }
 
 /**
- *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+       u32 reg;
+
+       /* If we are on Hyper-V, we implement this functionality
+        * differently.
+        */
+       reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+       /* CRC == 4 */
+       reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
  *  @hw: pointer to the HW structure
  *  @api: integer containing requested API version
  **/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 {
        int err;
        u32 msg[3];
@@ -703,6 +859,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
        return err;
 }
 
+/**
+ *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ *  Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+       /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+       if (api != ixgbe_mbox_api_10)
+               return IXGBE_ERR_INVALID_ARGUMENT;
+
+       return 0;
+}
+
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
                       unsigned int *default_tc)
 {
@@ -769,11 +940,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .stop_adapter           = ixgbevf_stop_hw_vf,
        .setup_link             = ixgbevf_setup_mac_link_vf,
        .check_link             = ixgbevf_check_mac_link_vf,
+       .negotiate_api_version  = ixgbevf_negotiate_api_version_vf,
        .set_rar                = ixgbevf_set_rar_vf,
        .update_mc_addr_list    = ixgbevf_update_mc_addr_list_vf,
        .update_xcast_mode      = ixgbevf_update_xcast_mode,
        .set_uc_addr            = ixgbevf_set_uc_addr_vf,
        .set_vfta               = ixgbevf_set_vfta_vf,
+       .set_rlpml              = ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+       .init_hw                = ixgbevf_init_hw_vf,
+       .reset_hw               = ixgbevf_hv_reset_hw_vf,
+       .start_hw               = ixgbevf_start_hw_vf,
+       .get_mac_addr           = ixgbevf_get_mac_addr_vf,
+       .stop_adapter           = ixgbevf_stop_hw_vf,
+       .setup_link             = ixgbevf_setup_mac_link_vf,
+       .check_link             = ixgbevf_hv_check_mac_link_vf,
+       .negotiate_api_version  = ixgbevf_hv_negotiate_api_version_vf,
+       .set_rar                = ixgbevf_hv_set_rar_vf,
+       .update_mc_addr_list    = ixgbevf_hv_update_mc_addr_list_vf,
+       .update_xcast_mode      = ixgbevf_hv_update_xcast_mode,
+       .set_uc_addr            = ixgbevf_hv_set_uc_addr_vf,
+       .set_vfta               = ixgbevf_hv_set_vfta_vf,
+       .set_rlpml              = ixgbevf_hv_set_rlpml_vf,
 };
 
 const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -781,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
        .mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+       .mac = ixgbe_mac_82599_vf,
+       .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X540_vf_info = {
        .mac = ixgbe_mac_X540_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+       .mac = ixgbe_mac_X540_vf,
+       .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X550_vf_info = {
        .mac = ixgbe_mac_X550_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+       .mac = ixgbe_mac_X550_vf,
+       .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
        .mac = ixgbe_mac_X550EM_x_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+       .mac = ixgbe_mac_X550EM_x_vf,
+       .mac_ops = &ixgbevf_hv_mac_ops,
+};
index ef9f7736b4dc6524ea12ea96c31c2218c6600f76..2cac610f32ba64832f7abd76b04be5edcd3acac0 100644 (file)
@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
        s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*stop_adapter)(struct ixgbe_hw *);
        s32 (*get_bus_info)(struct ixgbe_hw *);
+       s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
 
        /* Link */
        s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@ struct ixgbe_mac_operations {
        s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
        s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
-       s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+       s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
        s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+       void (*set_rlpml)(struct ixgbe_hw *, u16);
 };
 
 enum ixgbe_mac_type {
@@ -207,8 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
 
 #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
 
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
                       unsigned int *default_tc);
 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
index 3ddf657bc10bcc51d5a217595ba8e98ed909e0cd..836ebd8ee768fc8083da04a45192eff81742a644 100644 (file)
@@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme)
        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 }
 
-static inline void
+static void
 jme_reset_mac_processor(struct jme_adapter *jme)
 {
        static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
index d74f5f4e57824a97a3b745d4cbaa77098ad69ba3..1799fe1415df5070104edf4946a537e245c80176 100644 (file)
@@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev,
               writel(0x10, &ch->dmac);
 
               while (!(readl(&ch->dmas) & DMA_STAT_HALT))
-                      dev->trans_start = jiffies;
+                      netif_trans_update(dev);
 
               writel(0, &ch->dmas);
        }
@@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
        }
        dma_cache_wback((u32) td, sizeof(*td));
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&lp->lock, flags);
 
        return NETDEV_TX_OK;
@@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
                                &(lp->tx_dma_regs->dmandptr));
                        lp->tx_chain_status = desc_empty;
                        lp->tx_chain_head = lp->tx_chain_tail;
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                }
                if (dmas & DMA_STAT_ERR)
                        printk(KERN_ERR "%s: DMA error\n", dev->name);
@@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev)
        /* reset ethernet logic */
        writel(0, &lp->eth_regs->ethintfc);
        while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
 
        /* Enable Ethernet Interface */
        writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
index b630ef1e96469e1a6021ccf65845f420697d29ea..dc82b1b19574c369229ed6da33419e3c4dfce466 100644 (file)
@@ -519,7 +519,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
        byte_offset = CPHYSADDR(skb->data) % 16;
        ch->skb[ch->dma.desc] = skb;
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        spin_lock_irqsave(&priv->lock, flags);
        desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
@@ -657,7 +657,7 @@ ltq_etop_tx_timeout(struct net_device *dev)
        err = ltq_etop_hw_init(dev);
        if (err)
                goto err_hw;
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
        return;
 
index 7fc490225da507de93c66c1b55eba3f0e5295e58..a6d26d351dfc47c777b39c04a44c2f17bca0feab 100644 (file)
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Enable per-CPU interrupts on the CPU that is
                 * brought up.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_enable,
-                                        pp, true);
+               mvneta_percpu_enable(pp);
 
                /* Enable per-CPU interrupt on the one CPU we care
                 * about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Disable per-CPU interrupts on the CPU that is
                 * brought down.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_disable,
-                                        pp, true);
+               mvneta_percpu_disable(pp);
 
                break;
        case CPU_DEAD:
index 7ace07dad6a31d4b18ab5aa1e5335c0727cff596..89d0d835352ecf82a91b342a64d701c60da79d20 100644 (file)
@@ -979,8 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
                return 0;
 
        pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
-       if (!pep->phy)
-               return -ENODEV;
+       if (IS_ERR(pep->phy))
+               return PTR_ERR(pep->phy);
 
        err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
                                 pep->phy_intf);
@@ -1295,7 +1295,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        stats->tx_bytes += length;
        stats->tx_packets++;
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
                /* We handled the current skb, but now we are out of space.*/
                netif_stop_queue(dev);
index ec0a22119e09e2a49b54c9a23995e200a2d780a9..467138b423d3629cf468b3c54606a321214909ba 100644 (file)
@@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
        sky2_write32(hw, B0_IMSK, 0);
        sky2_read32(hw, B0_IMSK);
 
-       dev->trans_start = jiffies;     /* prevent tx timeout */
+       netif_trans_update(dev);        /* prevent tx timeout */
        napi_disable(&hw->napi);
        netif_tx_disable(dev);
 
index e0b68afea56e7695ed0de5122cc027c683aefb46..c984462fad2a26fa267faab7707181a19c1d1814 100644 (file)
@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        struct mtk_eth *eth = mac->hw;
        struct mtk_tx_dma *itxd, *txd;
        struct mtk_tx_buf *tx_buf;
-       unsigned long flags;
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
                return -ENOMEM;
 
-       /* normally we can rely on the stack not calling this more than once,
-        * however we have 2 queues running ont he same ring so we need to lock
-        * the ring access
-        */
-       spin_lock_irqsave(&eth->page_lock, flags);
        WRITE_ONCE(itxd->txd1, mapped_addr);
        tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
        dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -609,8 +603,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                        WRITE_ONCE(txd->txd1, mapped_addr);
                        WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
                                               TX_DMA_PLEN0(frag_map_size) |
-                                              last_frag * TX_DMA_LS0) |
-                                              mac->id);
+                                              last_frag * TX_DMA_LS0));
                        WRITE_ONCE(txd->txd4, 0);
 
                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -632,8 +625,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
                                (!nr_frags * TX_DMA_LS0)));
 
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
        netdev_sent_queue(dev, skb->len);
        skb_tx_timestamp(skb);
 
@@ -661,8 +652,6 @@ err_dma:
                itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
        } while (itxd != txd);
 
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
        return -ENOMEM;
 }
 
@@ -681,7 +670,29 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
                nfrags += skb_shinfo(skb)->nr_frags;
        }
 
-       return DIV_ROUND_UP(nfrags, 2);
+       return nfrags;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               netif_wake_queue(eth->netdev[i]);
+       }
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               netif_stop_queue(eth->netdev[i]);
+       }
 }
 
 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -690,14 +701,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct mtk_eth *eth = mac->hw;
        struct mtk_tx_ring *ring = &eth->tx_ring;
        struct net_device_stats *stats = &dev->stats;
+       unsigned long flags;
        bool gso = false;
        int tx_num;
 
+       /* normally we can rely on the stack not calling this more than once,
+        * however we have 2 queues running on the same ring so we need to lock
+        * the ring access
+        */
+       spin_lock_irqsave(&eth->page_lock, flags);
+
        tx_num = mtk_cal_txd_req(skb);
        if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
-               netif_stop_queue(dev);
+               mtk_stop_queue(eth);
                netif_err(eth, tx_queued, dev,
                          "Tx Ring full when queue awake!\n");
+               spin_unlock_irqrestore(&eth->page_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -720,15 +739,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
 
        if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
-               netif_stop_queue(dev);
+               mtk_stop_queue(eth);
                if (unlikely(atomic_read(&ring->free_count) >
                             ring->thresh))
-                       netif_wake_queue(dev);
+                       mtk_wake_queue(eth);
        }
+       spin_unlock_irqrestore(&eth->page_lock, flags);
 
        return NETDEV_TX_OK;
 
 drop:
+       spin_unlock_irqrestore(&eth->page_lock, flags);
        stats->tx_dropped++;
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
@@ -897,13 +918,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
        if (!total)
                return 0;
 
-       for (i = 0; i < MTK_MAC_COUNT; i++) {
-               if (!eth->netdev[i] ||
-                   unlikely(!netif_queue_stopped(eth->netdev[i])))
-                       continue;
-               if (atomic_read(&ring->free_count) > ring->thresh)
-                       netif_wake_queue(eth->netdev[i]);
-       }
+       if (atomic_read(&ring->free_count) > ring->thresh)
+               mtk_wake_queue(eth);
 
        return total;
 }
@@ -1176,7 +1192,7 @@ static void mtk_tx_timeout(struct net_device *dev)
        eth->netdev[mac->id]->stats.tx_errors++;
        netif_err(eth, tx_err, dev,
                  "transmit timed out\n");
-       schedule_work(&mac->pending_work);
+       schedule_work(&eth->pending_work);
 }
 
 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
@@ -1413,19 +1429,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static void mtk_pending_work(struct work_struct *work)
 {
-       struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-       struct mtk_eth *eth = mac->hw;
-       struct net_device *dev = eth->netdev[mac->id];
-       int err;
+       struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+       int err, i;
+       unsigned long restart = 0;
 
        rtnl_lock();
-       mtk_stop(dev);
 
-       err = mtk_open(dev);
-       if (err) {
-               netif_alert(eth, ifup, dev,
-                           "Driver up/down cycle failed, closing device.\n");
-               dev_close(dev);
+       /* stop all devices to make sure that dma is properly shut down */
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               mtk_stop(eth->netdev[i]);
+               __set_bit(i, &restart);
+       }
+
+       /* restart DMA and enable IRQs */
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!test_bit(i, &restart))
+                       continue;
+               err = mtk_open(eth->netdev[i]);
+               if (err) {
+                       netif_alert(eth, ifup, eth->netdev[i],
+                             "Driver up/down cycle failed, closing device.\n");
+                       dev_close(eth->netdev[i]);
+               }
        }
        rtnl_unlock();
 }
@@ -1435,15 +1462,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
        int i;
 
        for (i = 0; i < MTK_MAC_COUNT; i++) {
-               struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
                if (!eth->netdev[i])
                        continue;
 
                unregister_netdev(eth->netdev[i]);
                free_netdev(eth->netdev[i]);
-               cancel_work_sync(&mac->pending_work);
        }
+       cancel_work_sync(&eth->pending_work);
 
        return 0;
 }
@@ -1631,7 +1656,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        mac->id = id;
        mac->hw = eth;
        mac->of_node = np;
-       INIT_WORK(&mac->pending_work, mtk_pending_work);
 
        mac->hw_stats = devm_kzalloc(eth->dev,
                                     sizeof(*mac->hw_stats),
@@ -1645,6 +1669,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+       eth->netdev[id]->watchdog_timeo = HZ;
        eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
        eth->netdev[id]->base_addr = (unsigned long)eth->base;
        eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1678,10 +1703,6 @@ static int mtk_probe(struct platform_device *pdev)
        struct mtk_eth *eth;
        int err;
 
-       err = device_reset(&pdev->dev);
-       if (err)
-               return err;
-
        match = of_match_device(of_mtk_match, &pdev->dev);
        soc = (struct mtk_soc_data *)match->data;
 
@@ -1736,6 +1757,7 @@ static int mtk_probe(struct platform_device *pdev)
 
        eth->dev = &pdev->dev;
        eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+       INIT_WORK(&eth->pending_work, mtk_pending_work);
 
        err = mtk_hw_init(eth);
        if (err)
index 48a5292c8ed8b793e3058d41cd389a8064fbfaf4..eed626d56ea4f1d3df5f2ed3aacbd8bded28cead 100644 (file)
@@ -363,6 +363,7 @@ struct mtk_rx_ring {
  * @clk_gp1:           The gmac1 clock
  * @clk_gp2:           The gmac2 clock
  * @mii_bus:           If there is a bus we need to create an instance for it
+ * @pending_work:      The workqueue used to reset the dma ring
  */
 
 struct mtk_eth {
@@ -389,6 +390,7 @@ struct mtk_eth {
        struct clk                      *clk_gp1;
        struct clk                      *clk_gp2;
        struct mii_bus                  *mii_bus;
+       struct work_struct              pending_work;
 };
 
 /* struct mtk_mac -    the structure that holds the info about the MACs of the
@@ -398,7 +400,6 @@ struct mtk_eth {
  * @hw:                        Backpointer to our main datastruture
  * @hw_stats:          Packet statistics counter
  * @phy_dev:           The attached PHY if available
- * @pending_work:      The workqueue used to reset the dma ring
  */
 struct mtk_mac {
        int                             id;
@@ -406,7 +407,6 @@ struct mtk_mac {
        struct mtk_eth                  *hw;
        struct mtk_hw_stats             *hw_stats;
        struct phy_device               *phy_dev;
-       struct work_struct              pending_work;
 };
 
 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
index 0c51c69f802faf63a849300508ad4e809eeb7e3a..249a4584401ad487629c03b92a52f728d3318307 100644 (file)
@@ -576,41 +576,48 @@ out:
 
        return res;
 }
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
 
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
-                  struct mlx4_buf *buf, gfp_t gfp)
+static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
+                                struct mlx4_buf *buf, gfp_t gfp)
 {
        dma_addr_t t;
 
-       if (size <= max_direct) {
-               buf->nbufs        = 1;
-               buf->npages       = 1;
-               buf->page_shift   = get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_alloc_coherent(&dev->persist->pdev->dev,
-                                                      size, &t, gfp);
-               if (!buf->direct.buf)
-                       return -ENOMEM;
+       buf->nbufs        = 1;
+       buf->npages       = 1;
+       buf->page_shift   = get_order(size) + PAGE_SHIFT;
+       buf->direct.buf   =
+               dma_zalloc_coherent(&dev->persist->pdev->dev,
+                                   size, &t, gfp);
+       if (!buf->direct.buf)
+               return -ENOMEM;
 
-               buf->direct.map = t;
+       buf->direct.map = t;
 
-               while (t & ((1 << buf->page_shift) - 1)) {
-                       --buf->page_shift;
-                       buf->npages *= 2;
-               }
+       while (t & ((1 << buf->page_shift) - 1)) {
+               --buf->page_shift;
+               buf->npages *= 2;
+       }
 
-               memset(buf->direct.buf, 0, size);
+       return 0;
+}
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ *  requested size is > max_direct, we split the allocation into
+ *  multiple pages, so we don't require too much contiguous memory.
+ */
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+                  struct mlx4_buf *buf, gfp_t gfp)
+{
+       if (size <= max_direct) {
+               return mlx4_buf_direct_alloc(dev, size, buf, gfp);
        } else {
+               dma_addr_t t;
                int i;
 
-               buf->direct.buf  = NULL;
-               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               buf->npages      = buf->nbufs;
+               buf->direct.buf = NULL;
+               buf->nbufs      = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+               buf->npages     = buf->nbufs;
                buf->page_shift  = PAGE_SHIFT;
                buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
                                           gfp);
@@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 
                for (i = 0; i < buf->nbufs; ++i) {
                        buf->page_list[i].buf =
-                               dma_alloc_coherent(&dev->persist->pdev->dev,
-                                                  PAGE_SIZE,
-                                                  &t, gfp);
+                               dma_zalloc_coherent(&dev->persist->pdev->dev,
+                                                   PAGE_SIZE, &t, gfp);
                        if (!buf->page_list[i].buf)
                                goto err_free;
 
                        buf->page_list[i].map = t;
-
-                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
-               }
-
-               if (BITS_PER_LONG == 64) {
-                       struct page **pages;
-                       pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
-                       if (!pages)
-                               goto err_free;
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
                }
        }
 
@@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
 
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 {
-       int i;
-
-       if (buf->nbufs == 1)
+       if (buf->nbufs == 1) {
                dma_free_coherent(&dev->persist->pdev->dev, size,
-                                 buf->direct.buf,
-                                 buf->direct.map);
-       else {
-               if (BITS_PER_LONG == 64)
-                       vunmap(buf->direct.buf);
+                                 buf->direct.buf, buf->direct.map);
+       } else {
+               int i;
 
                for (i = 0; i < buf->nbufs; ++i)
                        if (buf->page_list[i].buf)
@@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
 EXPORT_SYMBOL_GPL(mlx4_db_free);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size, int max_direct)
+                      int size)
 {
        int err;
 
@@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 
        *wqres->db.db = 0;
 
-       err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+       err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
        if (err)
                goto err_db;
 
index af975a2b74c6d277c1a744cf358846d66de38abe..132cea655920d636c2e22fd2f110d163a421b8f3 100644 (file)
@@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
         */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
        err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
-                               cq->buf_size, 2 * PAGE_SIZE);
+                               cq->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_cq;
 
-       err = mlx4_en_map_buffer(&cq->wqres.buf);
-       if (err)
-               goto err_res;
-
        cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
        *pcq = cq;
 
        return 0;
 
-err_res:
-       mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
 err_cq:
        kfree(cq);
        *pcq = NULL;
@@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_cq *cq = *pcq;
 
-       mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
            cq->is_tx == RX)
index f69584a9b47fbef008a1ec138feba9f10504d997..c761194bb32352be84c2610464a9778d1a365c34 100644 (file)
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return bitmap_iterator_count(&it) +
                        (priv->tx_ring_num * 2) +
-                       (priv->rx_ring_num * 2);
+                       (priv->rx_ring_num * 3);
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
        for (i = 0; i < priv->rx_ring_num; i++) {
                data[index++] = priv->rx_ring[i]->packets;
                data[index++] = priv->rx_ring[i]->bytes;
+               data[index++] = priv->rx_ring[i]->dropped;
        }
        spin_unlock_bh(&priv->stats_lock);
 
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
                                "rx%d_packets", i);
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "rx%d_bytes", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_dropped", i);
                }
                break;
        case ETH_SS_PRIV_FLAGS:
index b4b258c8ca47d47f804fc85adb665e9d08fa48df..92e0624f4cf03cef36f085c8fa4f2669c80c0ef3 100644 (file)
@@ -1856,6 +1856,7 @@ static void mlx4_en_restart(struct work_struct *work)
 
        en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
+       rtnl_lock();
        mutex_lock(&mdev->state_lock);
        if (priv->port_up) {
                mlx4_en_stop_port(dev, 1);
@@ -1863,6 +1864,7 @@ static void mlx4_en_restart(struct work_struct *work)
                        en_err(priv, "Failed restarting port %d\n", priv->port);
        }
        mutex_unlock(&mdev->state_lock);
+       rtnl_unlock();
 }
 
 static void mlx4_en_clear_stats(struct net_device *dev)
@@ -2355,8 +2357,12 @@ out:
        }
 
        /* set offloads */
-       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                     NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_TSO6 |
+                                     NETIF_F_GSO_UDP_TUNNEL |
+                                     NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                     NETIF_F_GSO_PARTIAL;
 }
 
 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2365,8 +2371,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
                                                 vxlan_del_task);
        /* unset offloads */
-       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                       NETIF_F_RXCSUM |
+                                       NETIF_F_TSO | NETIF_F_TSO6 |
+                                       NETIF_F_GSO_UDP_TUNNEL |
+                                       NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                       NETIF_F_GSO_PARTIAL);
 
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2425,7 +2435,18 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
                                                netdev_features_t features)
 {
        features = vlan_features_check(skb, features);
-       return vxlan_features_check(skb, features);
+       features = vxlan_features_check(skb, features);
+
+       /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+        * support inner IPv6 checksums and segmentation so  we need to
+        * strip that feature if this is an IPv6 encapsulated frame.
+        */
+       if (skb->encapsulation &&
+           (skb->ip_summed == CHECKSUM_PARTIAL) &&
+           (ip_hdr(skb)->version != 4))
+               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+       return features;
 }
 #endif
 
@@ -2907,7 +2928,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
        /* Allocate page for receive rings */
        err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
-                               MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+                               MLX4_EN_PAGE_SIZE);
        if (err) {
                en_err(priv, "Failed to allocate page for rx qps\n");
                goto out;
@@ -2990,8 +3011,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        }
 
        if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-               dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                   NETIF_F_GSO_PARTIAL;
+               dev->features    |= NETIF_F_GSO_UDP_TUNNEL |
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                   NETIF_F_GSO_PARTIAL;
+               dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
        mdev->pndev[port] = dev;
index 3904b5fc0b7c904548763ffbb2d31fbcccfe5b6c..20b6c2e678b8879525ae41c995d5bc7692bfdec5 100644 (file)
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        u64 in_mod = reset << 8 | port;
        int err;
        int i, counter_index;
+       unsigned long sw_rx_dropped = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        for (i = 0; i < priv->rx_ring_num; i++) {
                stats->rx_packets += priv->rx_ring[i]->packets;
                stats->rx_bytes += priv->rx_ring[i]->bytes;
+               sw_rx_dropped += priv->rx_ring[i]->dropped;
                priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
                priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
                priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                                          &mlx4_en_stats->MCAST_prio_1,
                                          NUM_PRIORITIES);
        stats->collisions = 0;
-       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+                           sw_rx_dropped;
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
        stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
index 02e925d6f7348d774fe70c4a91b5038d5a27ba35..a6b0db0e038373348fcbe05d6490fcb35bb3c7fb 100644 (file)
@@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
        return ret;
 }
 
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
-       struct page **pages;
-       int i;
-
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return 0;
-
-       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-
-       for (i = 0; i < buf->nbufs; ++i)
-               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-       kfree(pages);
-       if (!buf->direct.buf)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return;
-
-       vunmap(buf->direct.buf);
-}
-
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
 {
     return;
index 86bcfe510e4e5b930b94de462f5ff9a76d4183c4..c1b3a9c8cf3b4db9412e722e09983e72c64e3caa 100644 (file)
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                gfp_t gfp = _gfp;
 
                if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN;
+                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
                page = alloc_pages(gfp, order);
                if (likely(page))
                        break;
@@ -126,7 +126,9 @@ out:
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
                                page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
-                       set_page_count(page, 1);
+                       /* Revert changes done by mlx4_alloc_pages */
+                       page_ref_sub(page, page_alloc[i].page_size /
+                                          priv->frag_info[i].frag_stride - 1);
                        put_page(page);
                }
        }
@@ -176,7 +178,9 @@ out:
                dma_unmap_page(priv->ddev, page_alloc->dma,
                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
-               set_page_count(page, 1);
+               /* Revert changes done by mlx4_alloc_pages */
+               page_ref_sub(page, page_alloc->page_size /
+                                  priv->frag_info[i].frag_stride - 1);
                put_page(page);
                page_alloc->page = NULL;
        }
@@ -390,17 +394,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 
        /* Allocate HW buffers on provided NUMA node */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
-                                ring->buf_size, 2 * PAGE_SIZE);
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_info;
 
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map RX buffer\n");
-               goto err_hwq;
-       }
        ring->buf = ring->wqres.buf.direct.buf;
 
        ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -408,8 +406,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
        *pring = ring;
        return 0;
 
-err_hwq:
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_info:
        vfree(ring->rx_info);
        ring->rx_info = NULL;
@@ -513,7 +509,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rx_ring *ring = *pring;
 
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
        vfree(ring->rx_info);
        ring->rx_info = NULL;
@@ -703,7 +698,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 
        if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
                return -1;
-       hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
 
        csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
                                       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
@@ -939,7 +934,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /* GRO not possible, complete processing here */
                skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
                if (!skb) {
-                       priv->stats.rx_dropped++;
+                       ring->dropped++;
                        goto next;
                }
 
index c0d7b729623636e241f76213596c664b1e70c047..f6e61570cb2c2b53a13adfbfbccc2c00e084171b 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/vmalloc.h>
 #include <linux/tcp.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/moduleparam.h>
 
 #include "mlx4_en.h"
@@ -93,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        /* Allocate HW buffers on provided NUMA node */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
-                                2 * PAGE_SIZE);
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err) {
                en_err(priv, "Failed allocating hwq resources\n");
                goto err_bounce;
        }
 
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map TX buffer\n");
-               goto err_hwq_res;
-       }
-
        ring->buf = ring->wqres.buf.direct.buf;
 
        en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                                    MLX4_RESERVE_ETH_BF_QP);
        if (err) {
                en_err(priv, "failed reserving qp for TX ring\n");
-               goto err_map;
+               goto err_hwq_res;
        }
 
        err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
 err_reserve:
        mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
 err_hwq_res:
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_bounce:
@@ -182,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
        mlx4_qp_remove(mdev->dev, &ring->qp);
        mlx4_qp_free(mdev->dev, &ring->qp);
        mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        kfree(ring->bounce_buf);
        ring->bounce_buf = NULL;
@@ -405,7 +396,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
        u32 packets = 0;
        u32 bytes = 0;
        int factor = priv->cqe_factor;
-       u64 timestamp = 0;
        int done = 0;
        int budget = priv->tx_work_limit;
        u32 last_nr_txbb;
@@ -445,9 +435,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
                new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
 
                do {
+                       u64 timestamp = 0;
+
                        txbbs_skipped += last_nr_txbb;
                        ring_index = (ring_index + last_nr_txbb) & size_mask;
-                       if (ring->tx_info[ring_index].ts_requested)
+
+                       if (unlikely(ring->tx_info[ring_index].ts_requested))
                                timestamp = mlx4_en_get_cqe_ts(cqe);
 
                        /* free next descriptor */
@@ -918,8 +911,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                                 tx_ind, fragptr);
 
        if (skb->encapsulation) {
-               struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
-               if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+               union {
+                       struct iphdr *v4;
+                       struct ipv6hdr *v6;
+                       unsigned char *hdr;
+               } ip;
+               u8 proto;
+
+               ip.hdr = skb_inner_network_header(skb);
+               proto = (ip.v4->version == 4) ? ip.v4->protocol :
+                                               ip.v6->nexthdr;
+
+               if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
                        op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
                else
                        op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
index 358f7230da589fc6e0b8b605255f126ae546d7db..12c77a70abdb451c475680c05bb2b7cf86436cdf 100644 (file)
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
        return 0;
 }
 
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+       int err = 0;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+               err = pci_enable_device(pdev);
+               if (!err)
+                       dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+
+       return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+               pci_disable_device(pdev);
+               dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                         int total_vfs, int *nvfs, struct mlx4_priv *priv,
                         int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
 
        pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
 
-       err = pci_enable_device(pdev);
+       err = mlx4_pci_enable_device(&priv->dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
        pci_release_regions(pdev);
 
 err_disable_pdev:
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(&priv->dev);
        pci_set_drvdata(pdev, NULL);
        return err;
 }
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        priv->pci_dev_data = id->driver_data;
        mutex_init(&dev->persist->device_state_mutex);
        mutex_init(&dev->persist->interface_state_mutex);
+       mutex_init(&dev->persist->pci_status_mutex);
 
        ret = devlink_register(devlink, &pdev->dev);
        if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        }
 
        pci_release_regions(pdev);
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(dev);
        devlink_unregister(devlink);
        kfree(dev->persist);
        devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(persist->dev);
        return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev  *dev  = persist->dev;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int               ret;
-       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
-       int total_vfs;
+       int err;
 
        mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+       err = mlx4_pci_enable_device(dev);
+       if (err) {
+               mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
        pci_set_master(pdev);
        pci_restore_state(pdev);
        pci_save_state(pdev);
+       return PCI_ERS_RESULT_RECOVERED;
+}
 
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       int total_vfs;
+       int err;
+
+       mlx4_err(dev, "%s was called\n", __func__);
        total_vfs = dev->persist->num_vfs;
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
        mutex_lock(&persist->interface_state_mutex);
        if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
-               ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+               err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
                                    priv, 1);
-               if (ret) {
-                       mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
-                                __func__,  ret);
+               if (err) {
+                       mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+                                __func__,  err);
                        goto end;
                }
 
-               ret = restore_current_port_types(dev, dev->persist->
+               err = restore_current_port_types(dev, dev->persist->
                                                 curr_port_type, dev->persist->
                                                 curr_port_poss_type);
-               if (ret)
-                       mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+               if (err)
+                       mlx4_err(dev, "could not restore original port types (%d)\n", err);
        }
 end:
        mutex_unlock(&persist->interface_state_mutex);
 
-       return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 
 static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
 static const struct pci_error_handlers mlx4_err_handler = {
        .error_detected = mlx4_pci_err_detected,
        .slot_reset     = mlx4_pci_slot_reset,
+       .resume         = mlx4_pci_resume,
 };
 
 static struct pci_driver mlx4_driver = {
index ef9683101eada7081163b7f6bf1473c18bd63da2..c9d7fc5159f2fb84e62496d810828e65324e7403 100644 (file)
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
        int                     init_port_ref[MLX4_MAX_PORTS + 1];
        u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       u8                      pptx;
+       u8                      pprx;
        int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
        struct mlx4_resource_tracker res_tracker;
        struct workqueue_struct *comm_wq;
index d12ab6a733446c52ba2afb037768bb2dd43f3515..cc84e09f324a42aa526be4f1e5a74f1c88899791 100644 (file)
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
        unsigned long csum_ok;
        unsigned long csum_none;
        unsigned long csum_complete;
+       unsigned long dropped;
        int hwtstamp_rx_filter;
        cpumask_var_t affinity_mask;
 };
@@ -671,8 +672,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
                int is_tx, int rss, int qpn, int cqn, int user_prio,
                struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
                            int loopback);
 void mlx4_en_calc_rx_buf(struct net_device *dev);
index 211c65087997dd5a92cc53a13f9fd3869927d09d..087b23b320cb44ff88dce362f47437de725dfeb5 100644 (file)
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                        }
 
                        gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       /* Slave cannot change Global Pause configuration */
+                       if (slave != mlx4_master_func_num(dev) &&
+                           ((gen_context->pptx != master->pptx) ||
+                            (gen_context->pprx != master->pprx))) {
+                               gen_context->pptx = master->pptx;
+                               gen_context->pprx = master->pprx;
+                               mlx4_warn(dev,
+                                         "denying Global Pause change for slave:%d\n",
+                                         slave);
+                       } else {
+                               master->pptx = gen_context->pptx;
+                               master->pprx = gen_context->pprx;
+                       }
                        break;
                case MLX4_SET_PORT_GID_TABLE:
                        /* change to MULTIPLE entries: number of guest's gids
index 4fc45ee0c5d165c1b723002cf83472024d1ca393..b531d4f3c00b5a2b397d97f11c592b4119e36289 100644 (file)
@@ -6,6 +6,6 @@ mlx5_core-y :=  main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
                en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
-               en_txrx.o en_clock.o vxlan.o en_tc.o
+               en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) +=  en_dcbnl.o
index 879e6276c4736cb6f14b49db220e102c66636092..e8a6c3325b396c6295db60c833309c4e4102e780 100644 (file)
@@ -46,6 +46,9 @@
 #include <linux/rhashtable.h>
 #include "wq.h"
 #include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
 
 #define MLX5E_MAX_NUM_TC       8
 
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE             6  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS        8  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ                  17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+                                   MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE            (MLX5_MPWRQ_NUM_STRIDES >> \
+                                                MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
+                                  BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+#define MLX5_UMR_ALIGN                         (2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
+
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 
 #define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 #define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 
 #define MLX5E_NUM_MAIN_GROUPS 9
 
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+                            wq_size / 2);
+       default:
+               return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+                            wq_size / 2);
+       }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+       default:
+               return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+       }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+       default:
+               return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+       }
+}
+
+struct mlx5e_tx_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+       struct mlx5_wqe_srq_next_seg  next;
+       struct mlx5_wqe_data_seg      data;
+};
+
+struct mlx5e_umr_wqe {
+       struct mlx5_wqe_ctrl_seg       ctrl;
+       struct mlx5_wqe_umr_ctrl_seg   uctrl;
+       struct mlx5_mkey_seg           mkc;
+       struct mlx5_wqe_data_seg       data;
+};
+
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 #define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
-static const char vport_strings[][ETH_GSTRING_LEN] = {
-       /* vport statistics */
-       "rx_packets",
-       "rx_bytes",
-       "tx_packets",
-       "tx_bytes",
-       "rx_error_packets",
-       "rx_error_bytes",
-       "tx_error_packets",
-       "tx_error_bytes",
-       "rx_unicast_packets",
-       "rx_unicast_bytes",
-       "tx_unicast_packets",
-       "tx_unicast_bytes",
-       "rx_multicast_packets",
-       "rx_multicast_bytes",
-       "tx_multicast_packets",
-       "tx_multicast_bytes",
-       "rx_broadcast_packets",
-       "rx_broadcast_bytes",
-       "tx_broadcast_packets",
-       "tx_broadcast_bytes",
-
-       /* SW counters */
-       "tso_packets",
-       "tso_bytes",
-       "tso_inner_packets",
-       "tso_inner_bytes",
-       "lro_packets",
-       "lro_bytes",
-       "rx_csum_good",
-       "rx_csum_none",
-       "rx_csum_sw",
-       "tx_csum_offload",
-       "tx_csum_inner",
-       "tx_queue_stopped",
-       "tx_queue_wake",
-       "tx_queue_dropped",
-       "rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
-       /* HW counters */
-       u64 rx_packets;
-       u64 rx_bytes;
-       u64 tx_packets;
-       u64 tx_bytes;
-       u64 rx_error_packets;
-       u64 rx_error_bytes;
-       u64 tx_error_packets;
-       u64 tx_error_bytes;
-       u64 rx_unicast_packets;
-       u64 rx_unicast_bytes;
-       u64 tx_unicast_packets;
-       u64 tx_unicast_bytes;
-       u64 rx_multicast_packets;
-       u64 rx_multicast_bytes;
-       u64 tx_multicast_packets;
-       u64 tx_multicast_bytes;
-       u64 rx_broadcast_packets;
-       u64 rx_broadcast_bytes;
-       u64 tx_broadcast_packets;
-       u64 tx_broadcast_bytes;
-
-       /* SW counters */
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 rx_csum_good;
-       u64 rx_csum_none;
-       u64 rx_csum_sw;
-       u64 tx_csum_offload;
-       u64 tx_csum_inner;
-       u64 tx_queue_stopped;
-       u64 tx_queue_wake;
-       u64 tx_queue_dropped;
-       u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS     35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
-       /* IEEE802.3 counters */
-       "frames_tx",
-       "frames_rx",
-       "check_seq_err",
-       "alignment_err",
-       "octets_tx",
-       "octets_received",
-       "multicast_xmitted",
-       "broadcast_xmitted",
-       "multicast_rx",
-       "broadcast_rx",
-       "in_range_len_errors",
-       "out_of_range_len",
-       "too_long_errors",
-       "symbol_err",
-       "mac_control_tx",
-       "mac_control_rx",
-       "unsupported_op_rx",
-       "pause_ctrl_rx",
-       "pause_ctrl_tx",
-
-       /* RFC2863 counters */
-       "in_octets",
-       "in_ucast_pkts",
-       "in_discards",
-       "in_errors",
-       "in_unknown_protos",
-       "out_octets",
-       "out_ucast_pkts",
-       "out_discards",
-       "out_errors",
-       "in_multicast_pkts",
-       "in_broadcast_pkts",
-       "out_multicast_pkts",
-       "out_broadcast_pkts",
-
-       /* RFC2819 counters */
-       "drop_events",
-       "octets",
-       "pkts",
-       "broadcast_pkts",
-       "multicast_pkts",
-       "crc_align_errors",
-       "undersize_pkts",
-       "oversize_pkts",
-       "fragments",
-       "jabbers",
-       "collisions",
-       "p64octets",
-       "p65to127octets",
-       "p128to255octets",
-       "p256to511octets",
-       "p512to1023octets",
-       "p1024to1518octets",
-       "p1519to2047octets",
-       "p2048to4095octets",
-       "p4096to8191octets",
-       "p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS                19
-#define NUM_RFC_2863_COUNTERS          13
-#define NUM_RFC_2819_COUNTERS          21
-#define NUM_PPORT_COUNTERS             (NUM_IEEE_802_3_COUNTERS + \
-                                        NUM_RFC_2863_COUNTERS + \
-                                        NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
-       __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
-       __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
-       __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
-       "packets",
-       "bytes",
-       "csum_none",
-       "csum_sw",
-       "lro_packets",
-       "lro_bytes",
-       "wqe_err"
-};
-
-struct mlx5e_rq_stats {
-       u64 packets;
-       u64 bytes;
-       u64 csum_none;
-       u64 csum_sw;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
-       "packets",
-       "bytes",
-       "tso_packets",
-       "tso_bytes",
-       "tso_inner_packets",
-       "tso_inner_bytes",
-       "csum_offload_inner",
-       "nop",
-       "csum_offload_none",
-       "stopped",
-       "wake",
-       "dropped",
-};
-
-struct mlx5e_sq_stats {
-       /* commonly accessed in data path */
-       u64 packets;
-       u64 bytes;
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 csum_offload_inner;
-       u64 nop;
-       /* less likely accessed in data path */
-       u64 csum_offload_none;
-       u64 stopped;
-       u64 wake;
-       u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
-       struct mlx5e_vport_stats   vport;
-       struct mlx5e_pport_stats   pport;
-};
-
 struct mlx5e_params {
        u8  log_sq_size;
+       u8  rq_wq_type;
+       u8  mpwqe_log_stride_sz;
+       u8  mpwqe_log_num_strides;
        u8  log_rq_size;
        u16 num_channels;
        u8  num_tc;
+       bool rx_cqe_compress_admin;
+       bool rx_cqe_compress;
        u16 rx_cq_moderation_usec;
        u16 rx_cq_moderation_pkts;
        u16 tx_cq_moderation_usec;
@@ -311,6 +169,7 @@ struct mlx5e_params {
        u8  rss_hfunc;
        u8  toeplitz_hash_key[40];
        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+       bool vlan_strip_disable;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        struct ieee_ets ets;
 #endif
@@ -331,6 +190,7 @@ struct mlx5e_tstamp {
 
 enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
+       MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
 };
 
 struct mlx5e_cq {
@@ -343,32 +203,88 @@ struct mlx5e_cq {
        struct mlx5e_channel      *channel;
        struct mlx5e_priv         *priv;
 
+       /* cqe decompression */
+       struct mlx5_cqe64          title;
+       struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+       u8                         mini_arr_idx;
+       u16                        decmprs_left;
+       u16                        decmprs_wqe_counter;
+
        /* control */
        struct mlx5_wq_ctrl        wq_ctrl;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+                                      struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+                                 u16 ix);
+
+struct mlx5e_dma_info {
+       struct page     *page;
+       dma_addr_t      addr;
+};
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
        u32                    wqe_sz;
        struct sk_buff       **skb;
+       struct mlx5e_mpw_info *wqe_info;
+       __be32                 mkey_be;
+       __be32                 umr_mkey_be;
 
        struct device         *pdev;
        struct net_device     *netdev;
        struct mlx5e_tstamp   *tstamp;
        struct mlx5e_rq_stats  stats;
        struct mlx5e_cq        cq;
+       mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+       mlx5e_fp_alloc_wqe     alloc_wqe;
 
        unsigned long          state;
        int                    ix;
 
        /* control */
        struct mlx5_wq_ctrl    wq_ctrl;
+       u8                     wq_type;
+       u32                    mpwqe_stride_sz;
+       u32                    mpwqe_num_strides;
        u32                    rqn;
        struct mlx5e_channel  *channel;
        struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_umr_dma_info {
+       __be64                *mtt;
+       __be64                *mtt_no_align;
+       dma_addr_t             mtt_addr;
+       struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+       union {
+               struct mlx5e_dma_info     dma_info;
+               struct mlx5e_umr_dma_info umr;
+       };
+       u16 consumed_strides;
+       u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+       void (*dma_pre_sync)(struct device *pdev,
+                            struct mlx5e_mpw_info *wi,
+                            u32 wqe_offset, u32 len);
+       void (*add_skb_frag)(struct mlx5e_rq *rq,
+                            struct sk_buff *skb,
+                            struct mlx5e_mpw_info *wi,
+                            u32 page_idx, u32 frag_offset, u32 len);
+       void (*copy_skb_header)(struct device *pdev,
+                               struct sk_buff *skb,
+                               struct mlx5e_mpw_info *wi,
+                               u32 page_idx, u32 offset,
+                               u32 headlen);
+       void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
 struct mlx5e_tx_wqe_info {
        u32 num_bytes;
        u8  num_wqebbs;
@@ -391,6 +307,11 @@ enum {
        MLX5E_SQ_STATE_BF_ENABLE,
 };
 
+struct mlx5e_ico_wqe_info {
+       u8  opcode;
+       u8  num_wqebbs;
+};
+
 struct mlx5e_sq {
        /* data path */
 
@@ -432,6 +353,7 @@ struct mlx5e_sq {
        struct mlx5_uar            uar;
        struct mlx5e_channel      *channel;
        int                        tc;
+       struct mlx5e_ico_wqe_info *ico_wqe_info;
 } ____cacheline_aligned_in_smp;
 
 static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +370,7 @@ struct mlx5e_channel {
        /* data path */
        struct mlx5e_rq            rq;
        struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+       struct mlx5e_sq            icosq;   /* internal control operations */
        struct napi_struct         napi;
        struct device             *pdev;
        struct net_device         *netdev;
@@ -474,42 +397,42 @@ enum mlx5e_traffic_types {
        MLX5E_TT_IPV6,
        MLX5E_TT_ANY,
        MLX5E_NUM_TT,
+       MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
 };
 
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+enum {
+       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_OPENED,
+       MLX5E_STATE_DESTROYING,
+};
 
-enum mlx5e_rqt_ix {
-       MLX5E_INDIRECTION_RQT,
-       MLX5E_SINGLE_RQ_RQT,
-       MLX5E_NUM_RQT,
+struct mlx5e_vxlan_db {
+       spinlock_t                      lock; /* protect vxlan table */
+       struct radix_tree_root          tree;
 };
 
-struct mlx5e_eth_addr_info {
+struct mlx5e_l2_rule {
        u8  addr[ETH_ALEN + 2];
-       u32 tt_vec;
-       struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+       struct mlx5_flow_rule *rule;
 };
 
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
-       struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
-       struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
-       struct mlx5e_eth_addr_info broadcast;
-       struct mlx5e_eth_addr_info allmulti;
-       struct mlx5e_eth_addr_info promisc;
-       bool                       broadcast_enabled;
-       bool                       allmulti_enabled;
-       bool                       promisc_enabled;
+struct mlx5e_flow_table {
+       int num_groups;
+       struct mlx5_flow_table *t;
+       struct mlx5_flow_group **g;
 };
 
-enum {
-       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
-       MLX5E_STATE_OPENED,
-       MLX5E_STATE_DESTROYING,
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+       struct mlx5_flow_table          *t;
+
+       struct rhashtable_params        ht_params;
+       struct rhashtable               ht;
 };
 
-struct mlx5e_vlan_db {
+struct mlx5e_vlan_table {
+       struct mlx5e_flow_table         ft;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct mlx5_flow_rule   *active_vlans_rule[VLAN_N_VID];
        struct mlx5_flow_rule   *untagged_rule;
@@ -517,29 +440,74 @@ struct mlx5e_vlan_db {
        bool          filter_disabled;
 };
 
-struct mlx5e_vxlan_db {
-       spinlock_t                      lock; /* protect vxlan table */
-       struct radix_tree_root          tree;
+struct mlx5e_l2_table {
+       struct mlx5e_flow_table    ft;
+       struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+       struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+       struct mlx5e_l2_rule       broadcast;
+       struct mlx5e_l2_rule       allmulti;
+       struct mlx5e_l2_rule       promisc;
+       bool                       broadcast_enabled;
+       bool                       allmulti_enabled;
+       bool                       promisc_enabled;
 };
 
-struct mlx5e_flow_table {
-       int num_groups;
-       struct mlx5_flow_table          *t;
-       struct mlx5_flow_group          **g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+       struct mlx5e_flow_table  ft;
+       struct mlx5_flow_rule    *rules[MLX5E_NUM_TT];
 };
 
-struct mlx5e_tc_flow_table {
-       struct mlx5_flow_table          *t;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+       struct mlx5e_flow_table  ft;
+       struct mlx5_flow_rule    *default_rule;
+       struct hlist_head        rules_hash[ARFS_HASH_SIZE];
+};
 
-       struct rhashtable_params        ht_params;
-       struct rhashtable               ht;
+enum  arfs_type {
+       ARFS_IPV4_TCP,
+       ARFS_IPV6_TCP,
+       ARFS_IPV4_UDP,
+       ARFS_IPV6_UDP,
+       ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+       struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+       /* Protect aRFS rules list */
+       spinlock_t                     arfs_lock;
+       struct list_head               rules;
+       int                            last_filter_id;
+       struct workqueue_struct        *wq;
+};
+
+/* NIC prio FTS */
+enum {
+       MLX5E_VLAN_FT_LEVEL = 0,
+       MLX5E_L2_FT_LEVEL,
+       MLX5E_TTC_FT_LEVEL,
+       MLX5E_ARFS_FT_LEVEL
 };
 
-struct mlx5e_flow_tables {
-       struct mlx5_flow_namespace      *ns;
-       struct mlx5e_tc_flow_table      tc;
-       struct mlx5e_flow_table         vlan;
-       struct mlx5e_flow_table         main;
+struct mlx5e_flow_steering {
+       struct mlx5_flow_namespace      *ns;
+       struct mlx5e_tc_table           tc;
+       struct mlx5e_vlan_table         vlan;
+       struct mlx5e_l2_table           l2;
+       struct mlx5e_ttc_table          ttc;
+       struct mlx5e_arfs_tables        arfs;
+};
+
+struct mlx5e_direct_tir {
+       u32              tirn;
+       u32              rqtn;
+};
+
+enum {
+       MLX5E_TC_PRIO = 0,
+       MLX5E_NIC_PRIO
 };
 
 struct mlx5e_priv {
@@ -554,19 +522,20 @@ struct mlx5e_priv {
        u32                        pdn;
        u32                        tdn;
        struct mlx5_core_mkey      mkey;
+       struct mlx5_core_mkey      umr_mkey;
        struct mlx5e_rq            drop_rq;
 
        struct mlx5e_channel     **channel;
        u32                        tisn[MLX5E_MAX_NUM_TC];
-       u32                        rqtn[MLX5E_NUM_RQT];
-       u32                        tirn[MLX5E_NUM_TT];
+       u32                        indir_rqtn;
+       u32                        indir_tirn[MLX5E_NUM_INDIR_TIRS];
+       struct mlx5e_direct_tir    direct_tir[MLX5E_MAX_NUM_CHANNELS];
 
-       struct mlx5e_flow_tables   fts;
-       struct mlx5e_eth_addr_db   eth_addr;
-       struct mlx5e_vlan_db       vlan;
+       struct mlx5e_flow_steering fs;
        struct mlx5e_vxlan_db      vxlan;
 
        struct mlx5e_params        params;
+       struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
@@ -575,18 +544,7 @@ struct mlx5e_priv {
        struct net_device         *netdev;
        struct mlx5e_stats         stats;
        struct mlx5e_tstamp        tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_eth_seg  eth;
-};
-
-struct mlx5e_rx_wqe {
-       struct mlx5_wqe_srq_next_seg  next;
-       struct mlx5_wqe_data_seg      data;
+       u16 q_counter;
 };
 
 enum mlx5e_link_mode {
@@ -609,7 +567,7 @@ enum mlx5e_link_mode {
        MLX5E_100GBASE_KR4       = 22,
        MLX5E_100GBASE_LR4       = 23,
        MLX5E_100BASE_TX         = 24,
-       MLX5E_100BASE_T          = 25,
+       MLX5E_1000BASE_T         = 25,
        MLX5E_10GBASE_T          = 26,
        MLX5E_25GBASE_CR         = 27,
        MLX5E_25GBASE_KR         = 28,
@@ -631,14 +589,35 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5_cqe64 *cqe,
+                                   u16 byte_cnt,
+                                   struct mlx5e_mpw_info *wi,
+                                   struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                       struct mlx5_cqe64 *cqe,
+                                       u16 byte_cnt,
+                                       struct mlx5e_mpw_info *wi,
+                                       struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                               struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5e_mpw_info *wi);
 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -647,6 +626,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
@@ -655,16 +635,20 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+                                  u32 *indirection_rqt, int len,
                                   int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-                                     struct mlx5e_tx_wqe *wqe, int bf_sz)
+                                     struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
 {
        u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
 
@@ -678,9 +662,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
         */
        wmb();
        if (bf_sz)
-               __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+               __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
        else
-               mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+               mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
        /* flush the write-combining mapped buffer */
        wmb();
 
@@ -701,12 +685,43 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
                     MLX5E_MAX_NUM_CHANNELS);
 }
 
+static inline int mlx5e_get_mtt_octw(int npages)
+{
+       return ALIGN(npages, 8) / 2;
+}
+
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 #endif
 
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+       return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+       return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+       return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                       u16 rxq_index, u32 flow_id);
+#endif
+
 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
 
 #endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
new file mode 100644 (file)
index 0000000..3515e78
--- /dev/null
@@ -0,0 +1,752 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include <linux/mlx5/fs.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en.h"
+
+struct arfs_tuple {
+       __be16 etype;
+       u8     ip_proto;
+       union {
+               __be32 src_ipv4;
+               struct in6_addr src_ipv6;
+       };
+       union {
+               __be32 dst_ipv4;
+               struct in6_addr dst_ipv6;
+       };
+       __be16 src_port;
+       __be16 dst_port;
+};
+
+struct arfs_rule {
+       struct mlx5e_priv       *priv;
+       struct work_struct      arfs_work;
+       struct mlx5_flow_rule   *rule;
+       struct hlist_node       hlist;
+       int                     rxq;
+       /* Flow ID passed to ndo_rx_flow_steer */
+       int                     flow_id;
+       /* Filter ID returned by ndo_rx_flow_steer */
+       int                     filter_id;
+       struct arfs_tuple       tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+       for (i = 0; i < ARFS_NUM_TYPES; i++) \
+               mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+       for (j = 0; j < ARFS_HASH_SIZE; j++) \
+               hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+       switch (type) {
+       case ARFS_IPV4_TCP:
+               return MLX5E_TT_IPV4_TCP;
+       case ARFS_IPV4_UDP:
+               return MLX5E_TT_IPV4_UDP;
+       case ARFS_IPV6_TCP:
+               return MLX5E_TT_IPV6_TCP;
+       case ARFS_IPV6_UDP:
+               return MLX5E_TT_IPV6_UDP;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_destination dest;
+       u32 *tirn = priv->indir_tirn;
+       int err = 0;
+       int tt;
+       int i;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       for (i = 0; i < ARFS_NUM_TYPES; i++) {
+               dest.tir_num = tirn[i];
+               tt = arfs_get_tt(i);
+               /* Modify ttc rules destination to bypass the aRFS tables*/
+               err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+                                                  &dest);
+               if (err) {
+                       netdev_err(priv->netdev,
+                                  "%s: modify ttc destination failed\n",
+                                  __func__);
+                       return err;
+               }
+       }
+       return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+       arfs_del_rules(priv);
+
+       return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_destination dest;
+       int err = 0;
+       int tt;
+       int i;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       for (i = 0; i < ARFS_NUM_TYPES; i++) {
+               dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+               tt = arfs_get_tt(i);
+               /* Modify ttc rules destination to point on the aRFS FTs */
+               err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+                                                  &dest);
+               if (err) {
+                       netdev_err(priv->netdev,
+                                  "%s: modify ttc destination failed err=%d\n",
+                                  __func__, err);
+                       arfs_disable(priv);
+                       return err;
+               }
+       }
+       return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+       mlx5_del_flow_rule(arfs_t->default_rule);
+       mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+       int i;
+
+       if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+               return;
+
+       arfs_del_rules(priv);
+       destroy_workqueue(priv->fs.arfs.wq);
+       for (i = 0; i < ARFS_NUM_TYPES; i++) {
+               if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+                       arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+       }
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+                                enum arfs_type type)
+{
+       struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+       struct mlx5_flow_destination dest;
+       u8 match_criteria_enable = 0;
+       u32 *tirn = priv->indir_tirn;
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+
+       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       switch (type) {
+       case ARFS_IPV4_TCP:
+               dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+               break;
+       case ARFS_IPV4_UDP:
+               dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+               break;
+       case ARFS_IPV6_TCP:
+               dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+               break;
+       case ARFS_IPV6_UDP:
+               dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+               break;
+       default:
+               err = -EINVAL;
+               goto out;
+       }
+
+       arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
+                                                 match_criteria, match_value,
+                                                 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                                 MLX5_FS_DEFAULT_FLOW_TAG,
+                                                 &dest);
+       if (IS_ERR(arfs_t->default_rule)) {
+               err = PTR_ERR(arfs_t->default_rule);
+               arfs_t->default_rule = NULL;
+               netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+                          __func__, type);
+       }
+out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+       return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS  2
+#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE  (MLX5E_ARFS_GROUP1_SIZE +\
+                                MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+                             enum  arfs_type type)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       void *outer_headers_c;
+       int ix = 0;
+       u32 *in;
+       int err;
+       u8 *mc;
+
+       ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+                       sizeof(*ft->g), GFP_KERNEL);
+       in = mlx5_vzalloc(inlen);
+       if  (!in || !ft->g) {
+               kvfree(ft->g);
+               kvfree(in);
+               return -ENOMEM;
+       }
+
+       mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+       outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+                                      outer_headers);
+       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+       switch (type) {
+       case ARFS_IPV4_TCP:
+       case ARFS_IPV6_TCP:
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+               break;
+       case ARFS_IPV4_UDP:
+       case ARFS_IPV6_UDP:
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+               break;
+       default:
+               err = -EINVAL;
+               goto out;
+       }
+
+       switch (type) {
+       case ARFS_IPV4_TCP:
+       case ARFS_IPV4_UDP:
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+                                src_ipv4_src_ipv6.ipv4_layout.ipv4);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+                                dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+               break;
+       case ARFS_IPV6_TCP:
+       case ARFS_IPV6_UDP:
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+               break;
+       default:
+               err = -EINVAL;
+               goto out;
+       }
+
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_ARFS_GROUP1_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_ARFS_GROUP2_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err;
+       ft->num_groups++;
+
+       kvfree(in);
+       return 0;
+
+err:
+       err = PTR_ERR(ft->g[ft->num_groups]);
+       ft->g[ft->num_groups] = NULL;
+out:
+       kvfree(in);
+
+       return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+                            enum arfs_type type)
+{
+       struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+       struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+       int err;
+
+       ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+                                      MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+       if (IS_ERR(ft->t)) {
+               err = PTR_ERR(ft->t);
+               ft->t = NULL;
+               return err;
+       }
+
+       err = arfs_create_groups(ft, type);
+       if (err)
+               goto err;
+
+       err = arfs_add_default_rule(priv, type);
+       if (err)
+               goto err;
+
+       return 0;
+err:
+       mlx5e_destroy_flow_table(ft);
+       return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+       int err = 0;
+       int i;
+
+       if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+               return 0;
+
+       spin_lock_init(&priv->fs.arfs.arfs_lock);
+       INIT_LIST_HEAD(&priv->fs.arfs.rules);
+       priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+       if (!priv->fs.arfs.wq)
+               return -ENOMEM;
+
+       for (i = 0; i < ARFS_NUM_TYPES; i++) {
+               err = arfs_create_table(priv, i);
+               if (err)
+                       goto err;
+       }
+       return 0;
+err:
+       mlx5e_arfs_destroy_tables(priv);
+       return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+       struct arfs_rule *arfs_rule;
+       struct hlist_node *htmp;
+       int quota = 0;
+       int i;
+       int j;
+
+       HLIST_HEAD(del_list);
+       spin_lock_bh(&priv->fs.arfs.arfs_lock);
+       mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+               if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+                       break;
+               if (!work_pending(&arfs_rule->arfs_work) &&
+                   rps_may_expire_flow(priv->netdev,
+                                       arfs_rule->rxq, arfs_rule->flow_id,
+                                       arfs_rule->filter_id)) {
+                       hlist_del_init(&arfs_rule->hlist);
+                       hlist_add_head(&arfs_rule->hlist, &del_list);
+               }
+       }
+       spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+       hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+               if (arfs_rule->rule)
+                       mlx5_del_flow_rule(arfs_rule->rule);
+               hlist_del(&arfs_rule->hlist);
+               kfree(arfs_rule);
+       }
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+       struct hlist_node *htmp;
+       struct arfs_rule *rule;
+       int i;
+       int j;
+
+       HLIST_HEAD(del_list);
+       spin_lock_bh(&priv->fs.arfs.arfs_lock);
+       mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+               hlist_del_init(&rule->hlist);
+               hlist_add_head(&rule->hlist, &del_list);
+       }
+       spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+       hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+               cancel_work_sync(&rule->arfs_work);
+               if (rule->rule)
+                       mlx5_del_flow_rule(rule->rule);
+               hlist_del(&rule->hlist);
+               kfree(rule);
+       }
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+                __be16 dst_port)
+{
+       unsigned long l;
+       int bucket_idx;
+
+       l = (__force unsigned long)src_port |
+           ((__force unsigned long)dst_port << 2);
+
+       bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+       return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+       return (skb->protocol == htons(ETH_P_IP)) ?
+               ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+                                        u8 ip_proto, __be16 etype)
+{
+       if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+               return &arfs->arfs_tables[ARFS_IPV4_TCP];
+       if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+               return &arfs->arfs_tables[ARFS_IPV4_UDP];
+       if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+               return &arfs->arfs_tables[ARFS_IPV6_TCP];
+       if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+               return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+       return NULL;
+}
+
+static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
+                                           struct arfs_rule *arfs_rule)
+{
+       struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+       struct arfs_tuple *tuple = &arfs_rule->tuple;
+       struct mlx5_flow_rule *rule = NULL;
+       struct mlx5_flow_destination dest;
+       struct arfs_table *arfs_table;
+       u8 match_criteria_enable = 0;
+       struct mlx5_flow_table *ft;
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+
+       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                ntohs(tuple->etype));
+       arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+       if (!arfs_table) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       ft = arfs_table->ft.t;
+       if (tuple->ip_proto == IPPROTO_TCP) {
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.tcp_dport);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.tcp_sport);
+               MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+                        ntohs(tuple->dst_port));
+               MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+                        ntohs(tuple->src_port));
+       } else {
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.udp_dport);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.udp_sport);
+               MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+                        ntohs(tuple->dst_port));
+               MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+                        ntohs(tuple->src_port));
+       }
+       if (tuple->etype == htons(ETH_P_IP)) {
+               memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+                                   outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+                      &tuple->src_ipv4,
+                      4);
+               memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+                      &tuple->dst_ipv4,
+                      4);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+       } else {
+               memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &tuple->src_ipv6,
+                      16);
+               memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &tuple->dst_ipv6,
+                      16);
+               memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      0xff,
+                      16);
+               memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      0xff,
+                      16);
+       }
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+       rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+                                 match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                 MLX5_FS_DEFAULT_FLOW_TAG,
+                                 &dest);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+                          __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+       }
+
+out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+       return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+                               struct mlx5_flow_rule *rule, u16 rxq)
+{
+       struct mlx5_flow_destination dst;
+       int err = 0;
+
+       dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dst.tir_num = priv->direct_tir[rxq].tirn;
+       err =  mlx5_modify_rule_destination(rule, &dst);
+       if (err)
+               netdev_warn(priv->netdev,
+                           "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+       struct arfs_rule *arfs_rule = container_of(work,
+                                                  struct arfs_rule,
+                                                  arfs_work);
+       struct mlx5e_priv *priv = arfs_rule->priv;
+       struct mlx5_flow_rule *rule;
+
+       mutex_lock(&priv->state_lock);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               spin_lock_bh(&priv->fs.arfs.arfs_lock);
+               hlist_del(&arfs_rule->hlist);
+               spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+               mutex_unlock(&priv->state_lock);
+               kfree(arfs_rule);
+               goto out;
+       }
+       mutex_unlock(&priv->state_lock);
+
+       if (!arfs_rule->rule) {
+               rule = arfs_add_rule(priv, arfs_rule);
+               if (IS_ERR(rule))
+                       goto out;
+               arfs_rule->rule = rule;
+       } else {
+               arfs_modify_rule_rq(priv, arfs_rule->rule,
+                                   arfs_rule->rxq);
+       }
+out:
+       arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+       char *transport_header;
+
+       transport_header = skb_transport_header(skb);
+       if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+               return ((struct tcphdr *)transport_header)->dest;
+       return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+       char *transport_header;
+
+       transport_header = skb_transport_header(skb);
+       if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+               return ((struct tcphdr *)transport_header)->source;
+       return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+                                        struct arfs_table *arfs_t,
+                                        const struct sk_buff *skb,
+                                        u16 rxq, u32 flow_id)
+{
+       struct arfs_rule *rule;
+       struct arfs_tuple *tuple;
+
+       rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+       if (!rule)
+               return NULL;
+
+       rule->priv = priv;
+       rule->rxq = rxq;
+       INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+       tuple = &rule->tuple;
+       tuple->etype = skb->protocol;
+       if (tuple->etype == htons(ETH_P_IP)) {
+               tuple->src_ipv4 = ip_hdr(skb)->saddr;
+               tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+       } else {
+               memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+                      sizeof(struct in6_addr));
+               memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+                      sizeof(struct in6_addr));
+       }
+       tuple->ip_proto = arfs_get_ip_proto(skb);
+       tuple->src_port = arfs_get_src_port(skb);
+       tuple->dst_port = arfs_get_dst_port(skb);
+
+       rule->flow_id = flow_id;
+       rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+       hlist_add_head(&rule->hlist,
+                      arfs_hash_bucket(arfs_t, tuple->src_port,
+                                       tuple->dst_port));
+       return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+                        const struct sk_buff *skb)
+{
+       if (tuple->etype == htons(ETH_P_IP) &&
+           tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+           tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+               return true;
+       if (tuple->etype == htons(ETH_P_IPV6) &&
+           (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+                    sizeof(struct in6_addr))) &&
+           (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+                    sizeof(struct in6_addr))))
+               return true;
+       return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+                                       const struct sk_buff *skb)
+{
+       struct arfs_rule *arfs_rule;
+       struct hlist_head *head;
+       __be16 src_port = arfs_get_src_port(skb);
+       __be16 dst_port = arfs_get_dst_port(skb);
+
+       head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+       hlist_for_each_entry(arfs_rule, head, hlist) {
+               if (arfs_rule->tuple.src_port == src_port &&
+                   arfs_rule->tuple.dst_port == dst_port &&
+                   arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+                       return arfs_rule;
+               }
+       }
+
+       return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                       u16 rxq_index, u32 flow_id)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+       struct arfs_table *arfs_t;
+       struct arfs_rule *arfs_rule;
+
+       if (skb->protocol != htons(ETH_P_IP) &&
+           skb->protocol != htons(ETH_P_IPV6))
+               return -EPROTONOSUPPORT;
+
+       arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+       if (!arfs_t)
+               return -EPROTONOSUPPORT;
+
+       spin_lock_bh(&arfs->arfs_lock);
+       arfs_rule = arfs_find_rule(arfs_t, skb);
+       if (arfs_rule) {
+               if (arfs_rule->rxq == rxq_index) {
+                       spin_unlock_bh(&arfs->arfs_lock);
+                       return arfs_rule->filter_id;
+               }
+               arfs_rule->rxq = rxq_index;
+       } else {
+               arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+                                           rxq_index, flow_id);
+               if (!arfs_rule) {
+                       spin_unlock_bh(&arfs->arfs_lock);
+                       return -ENOMEM;
+               }
+       }
+       queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+       spin_unlock_bh(&arfs->arfs_lock);
+       return arfs_rule->filter_id;
+}
+#endif
index 2018eebe1531563e036c41c19372331f8a576b14..847a8f3ac2b27fda609bd95f0accbfe85d994104 100644 (file)
@@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
        /* RX HW timestamp */
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
+               /* Reset CQE compression to Admin default */
+               mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               /* Disable CQE compression */
+               mlx5e_modify_rx_cqe_compression(priv, false);
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
        default:
index 3036f279a8fdcc7b2c547445747be982f90180ba..b2db180ae2a5bbdda29219d63c72feea0958c12c 100644 (file)
@@ -174,8 +174,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+       int i;
 
        pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+               pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+       }
 
        return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
 }
index 68834b715f6c114c89d6df339cb9cc81a72b8f75..fc7dcc03b1debd36c6362d887e4f90b3cefab50f 100644 (file)
@@ -138,10 +138,10 @@ static const struct {
        [MLX5E_100BASE_TX]   = {
                .speed      = 100,
        },
-       [MLX5E_100BASE_T]    = {
-               .supported  = SUPPORTED_100baseT_Full,
-               .advertised = ADVERTISED_100baseT_Full,
-               .speed      = 100,
+       [MLX5E_1000BASE_T]    = {
+               .supported  = SUPPORTED_1000baseT_Full,
+               .advertised = ADVERTISED_1000baseT_Full,
+               .speed      = 1000,
        },
        [MLX5E_10GBASE_T]    = {
                .supported  = SUPPORTED_10000baseT_Full,
@@ -165,26 +165,112 @@ static const struct {
        },
 };
 
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u8 pfc_en_tx;
+       u8 pfc_en_rx;
+       int err;
+
+       err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+       return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+       (NUM_RQ_STATS * priv->params.num_channels * \
+        test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+       (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+        test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+
 static int mlx5e_get_sset_count(struct net_device *dev, int sset)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        switch (sset) {
        case ETH_SS_STATS:
-               return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
-                      priv->params.num_channels * NUM_RQ_STATS +
-                      priv->params.num_channels * priv->params.num_tc *
-                                                  NUM_SQ_STATS;
+               return NUM_SW_COUNTERS +
+                      MLX5E_NUM_Q_CNTRS(priv) +
+                      NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+                      MLX5E_NUM_RQ_STATS(priv) +
+                      MLX5E_NUM_SQ_STATS(priv) +
+                      MLX5E_NUM_PFC_COUNTERS(priv);
        /* fallthrough */
        default:
                return -EOPNOTSUPP;
        }
 }
 
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+       int i, j, tc, prio, idx = 0;
+       unsigned long pfc_combined;
+
+       /* SW counters */
+       for (i = 0; i < NUM_SW_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+
+       /* Q counters */
+       for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+
+       /* VPORT counters */
+       for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      vport_stats_desc[i].name);
+
+       /* PPORT counters */
+       for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      pport_802_3_stats_desc[i].name);
+
+       for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      pport_2863_stats_desc[i].name);
+
+       for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      pport_2819_stats_desc[i].name);
+
+       for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+               for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
+                               prio,
+                               pport_per_prio_traffic_stats_desc[i].name);
+       }
+
+       pfc_combined = mlx5e_query_pfc_combined(priv);
+       for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+               for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
+                               prio, pport_per_prio_pfc_stats_desc[i].name);
+               }
+       }
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return;
+
+       /* per channel counters */
+       for (i = 0; i < priv->params.num_channels; i++)
+               for (j = 0; j < NUM_RQ_STATS; j++)
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
+                               rq_stats_desc[j].name);
+
+       for (tc = 0; tc < priv->params.num_tc; tc++)
+               for (i = 0; i < priv->params.num_channels; i++)
+                       for (j = 0; j < NUM_SQ_STATS; j++)
+                               sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                                       "tx%d_%s",
+                                       priv->channeltc_to_txq_map[i][tc],
+                                       sq_stats_desc[j].name);
+}
+
 static void mlx5e_get_strings(struct net_device *dev,
                              uint32_t stringset, uint8_t *data)
 {
-       int i, j, tc, idx = 0;
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        switch (stringset) {
@@ -195,30 +281,7 @@ static void mlx5e_get_strings(struct net_device *dev,
                break;
 
        case ETH_SS_STATS:
-               /* VPORT counters */
-               for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-                       strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                              vport_strings[i]);
-
-               /* PPORT counters */
-               for (i = 0; i < NUM_PPORT_COUNTERS; i++)
-                       strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                              pport_strings[i]);
-
-               /* per channel counters */
-               for (i = 0; i < priv->params.num_channels; i++)
-                       for (j = 0; j < NUM_RQ_STATS; j++)
-                               sprintf(data + (idx++) * ETH_GSTRING_LEN,
-                                       "rx%d_%s", i, rq_stats_strings[j]);
-
-               for (tc = 0; tc < priv->params.num_tc; tc++)
-                       for (i = 0; i < priv->params.num_channels; i++)
-                               for (j = 0; j < NUM_SQ_STATS; j++)
-                                       sprintf(data +
-                                             (idx++) * ETH_GSTRING_LEN,
-                                             "tx%d_%s",
-                                             priv->channeltc_to_txq_map[i][tc],
-                                             sq_stats_strings[j]);
+               mlx5e_fill_stats_strings(priv, data);
                break;
        }
 }
@@ -227,7 +290,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                    struct ethtool_stats *stats, u64 *data)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int i, j, tc, idx = 0;
+       int i, j, tc, prio, idx = 0;
+       unsigned long pfc_combined;
 
        if (!data)
                return;
@@ -237,33 +301,68 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                mlx5e_update_stats(priv);
        mutex_unlock(&priv->state_lock);
 
+       for (i = 0; i < NUM_SW_COUNTERS; i++)
+               data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+                                                  sw_stats_desc, i);
+
+       for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+               data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+                                                  q_stats_desc, i);
+
        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-               data[idx++] = ((u64 *)&priv->stats.vport)[i];
+               data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+                                                 vport_stats_desc, i);
 
-       for (i = 0; i < NUM_PPORT_COUNTERS; i++)
-               data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+       for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+               data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+                                                 pport_802_3_stats_desc, i);
+
+       for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+               data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+                                                 pport_2863_stats_desc, i);
+
+       for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+               data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+                                                 pport_2819_stats_desc, i);
+
+       for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+               for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+                       data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+                                                pport_per_prio_traffic_stats_desc, i);
+       }
+
+       pfc_combined = mlx5e_query_pfc_combined(priv);
+       for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+               for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+                       data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+                                                         pport_per_prio_pfc_stats_desc, i);
+               }
+       }
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return;
 
        /* per channel counters */
        for (i = 0; i < priv->params.num_channels; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
-                       data[idx++] = !test_bit(MLX5E_STATE_OPENED,
-                                               &priv->state) ? 0 :
-                                      ((u64 *)&priv->channel[i]->rq.stats)[j];
+                       data[idx++] =
+                              MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+                                                   rq_stats_desc, j);
 
        for (tc = 0; tc < priv->params.num_tc; tc++)
                for (i = 0; i < priv->params.num_channels; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
-                               data[idx++] = !test_bit(MLX5E_STATE_OPENED,
-                                                       &priv->state) ? 0 :
-                               ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+                               data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+                                                                  sq_stats_desc, j);
 }
 
 static void mlx5e_get_ringparam(struct net_device *dev,
                                struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       int rq_wq_type = priv->params.rq_wq_type;
 
-       param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+       param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
        param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
        param->rx_pending     = 1 << priv->params.log_rq_size;
        param->tx_pending     = 1 << priv->params.log_sq_size;
@@ -274,6 +373,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        bool was_opened;
+       int rq_wq_type = priv->params.rq_wq_type;
        u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
@@ -289,16 +389,16 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                            __func__);
                return -EINVAL;
        }
-       if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+       if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
                netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
                            __func__, param->rx_pending,
-                           1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+                           1 << mlx5_min_log_rq_size(rq_wq_type));
                return -EINVAL;
        }
-       if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+       if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
                netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
                            __func__, param->rx_pending,
-                           1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+                           1 << mlx5_max_log_rq_size(rq_wq_type));
                return -EINVAL;
        }
        if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
@@ -316,8 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
 
        log_rq_size = order_base_2(param->rx_pending);
        log_sq_size = order_base_2(param->tx_pending);
-       min_rx_wqes = min_t(u16, param->rx_pending - 1,
-                           MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
 
        if (log_rq_size == priv->params.log_rq_size &&
            log_sq_size == priv->params.log_sq_size &&
@@ -357,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        int ncv = mlx5e_get_max_num_channels(priv->mdev);
        unsigned int count = ch->combined_count;
+       bool arfs_enabled;
        bool was_opened;
        int err = 0;
 
@@ -385,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev,
        if (was_opened)
                mlx5e_close_locked(dev);
 
+       arfs_enabled = dev->features & NETIF_F_NTUPLE;
+       if (arfs_enabled)
+               mlx5e_arfs_disable(priv);
+
        priv->params.num_channels = count;
-       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+       mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
                                      MLX5E_INDIR_RQT_SIZE, count);
 
        if (was_opened)
                err = mlx5e_open_locked(dev);
+       if (err)
+               goto out;
 
+       if (arfs_enabled) {
+               err = mlx5e_arfs_enable(priv);
+               if (err)
+                       netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+                                  __func__, err);
+       }
+
+out:
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -499,6 +613,25 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
        return 0;
 }
 
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+       u32 max_speed = 0;
+       u32 proto_cap;
+       int err;
+       int i;
+
+       err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+       if (err)
+               return err;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+               if (proto_cap & MLX5E_PROT_MASK(i))
+                       max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+       *speed = max_speed;
+       return 0;
+}
+
 static void get_speed_duplex(struct net_device *netdev,
                             u32 eth_proto_oper,
                             struct ethtool_cmd *cmd)
@@ -727,9 +860,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
        MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
        mlx5e_build_tir_ctx_hash(tirc, priv);
 
-       for (i = 0; i < MLX5E_NUM_TT; i++)
-               if (IS_HASHING_TT(i))
-                       mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+               mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
 }
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -751,9 +883,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
        mutex_lock(&priv->state_lock);
 
        if (indir) {
+               u32 rqtn = priv->indir_rqtn;
+
                memcpy(priv->params.indirection_rqt, indir,
                       sizeof(priv->params.indirection_rqt));
-               mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+               mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
        }
 
        if (key)
@@ -1036,6 +1170,108 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        return mlx5_set_port_wol(mdev, mlx5_wol_mode);
 }
 
+static int mlx5e_set_phys_id(struct net_device *dev,
+                            enum ethtool_phys_id_state state)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 beacon_duration;
+
+       if (!MLX5_CAP_GEN(mdev, beacon_led))
+               return -EOPNOTSUPP;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               beacon_duration = MLX5_BEACON_DURATION_INF;
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               beacon_duration = MLX5_BEACON_DURATION_OFF;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+                                struct ethtool_modinfo *modinfo)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *dev = priv->mdev;
+       int size_read = 0;
+       u8 data[4];
+
+       size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+       if (size_read < 2)
+               return -EIO;
+
+       /* data[0] = identifier byte */
+       switch (data[0]) {
+       case MLX5_MODULE_ID_QSFP:
+               modinfo->type       = ETH_MODULE_SFF_8436;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+               break;
+       case MLX5_MODULE_ID_QSFP_PLUS:
+       case MLX5_MODULE_ID_QSFP28:
+               /* data[1] = revision id */
+               if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+                       modinfo->type       = ETH_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+               } else {
+                       modinfo->type       = ETH_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+               }
+               break;
+       case MLX5_MODULE_ID_SFP:
+               modinfo->type       = ETH_MODULE_SFF_8472;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               break;
+       default:
+               netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+                          __func__, data[0]);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+                                  struct ethtool_eeprom *ee,
+                                  u8 *data)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int offset = ee->offset;
+       int size_read;
+       int i = 0;
+
+       if (!ee->len)
+               return -EINVAL;
+
+       memset(data, 0, ee->len);
+
+       while (i < ee->len) {
+               size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+                                                    data + i);
+
+               if (!size_read)
+                       /* Done reading */
+                       return 0;
+
+               if (size_read < 0) {
+                       netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+                                  __func__, size_read);
+                       return 0;
+               }
+
+               i += size_read;
+               offset += size_read;
+       }
+
+       return 0;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
@@ -1060,6 +1296,9 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_pauseparam    = mlx5e_get_pauseparam,
        .set_pauseparam    = mlx5e_set_pauseparam,
        .get_ts_info       = mlx5e_get_ts_info,
+       .set_phys_id       = mlx5e_set_phys_id,
        .get_wol           = mlx5e_get_wol,
        .set_wol           = mlx5e_set_wol,
+       .get_module_info   = mlx5e_get_module_info,
+       .get_module_eeprom = mlx5e_get_module_eeprom,
 };
index d00a2420341010e8c759b6e2bc44debbf498a7b0..b32740092854d3569ce60e231a5619d987d1440c 100644 (file)
 #include <linux/mlx5/fs.h>
 #include "en.h"
 
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+                                 struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_l2_rule *ai);
 
 enum {
        MLX5E_FULLMATCH = 0,
@@ -58,21 +61,21 @@ enum {
        MLX5E_ACTION_DEL  = 2,
 };
 
-struct mlx5e_eth_addr_hash_node {
+struct mlx5e_l2_hash_node {
        struct hlist_node          hlist;
        u8                         action;
-       struct mlx5e_eth_addr_info ai;
+       struct mlx5e_l2_rule ai;
 };
 
-static inline int mlx5e_hash_eth_addr(u8 *addr)
+static inline int mlx5e_hash_l2(u8 *addr)
 {
        return addr[5];
 }
 
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
 {
-       struct mlx5e_eth_addr_hash_node *hn;
-       int ix = mlx5e_hash_eth_addr(addr);
+       struct mlx5e_l2_hash_node *hn;
+       int ix = mlx5e_hash_l2(addr);
        int found = 0;
 
        hlist_for_each_entry(hn, &hash[ix], hlist)
@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
        hlist_add_head(&hn->hlist, &hash[ix]);
 }
 
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
 {
        hlist_del(&hn->hlist);
        kfree(hn);
 }
 
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
-                                              struct mlx5e_eth_addr_info *ai)
-{
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_ANY))
-               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
-       if (is_unicast_ether_addr(addr))
-               return MLX5E_UC;
-
-       if ((addr[0] == 0x01) &&
-           (addr[1] == 0x00) &&
-           (addr[2] == 0x5e) &&
-          !(addr[3] &  0x80))
-               return MLX5E_MC_IPV4;
-
-       if ((addr[0] == 0x33) &&
-           (addr[1] == 0x33))
-               return MLX5E_MC_IPV6;
-
-       return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
-       int eth_addr_type;
-       u32 ret;
-
-       switch (type) {
-       case MLX5E_FULLMATCH:
-               eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
-               switch (eth_addr_type) {
-               case MLX5E_UC:
-                       ret =
-                               BIT(MLX5E_TT_IPV4_TCP)       |
-                               BIT(MLX5E_TT_IPV6_TCP)       |
-                               BIT(MLX5E_TT_IPV4_UDP)       |
-                               BIT(MLX5E_TT_IPV6_UDP)       |
-                               BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-                               BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-                               BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-                               BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-                               BIT(MLX5E_TT_IPV4)           |
-                               BIT(MLX5E_TT_IPV6)           |
-                               BIT(MLX5E_TT_ANY)            |
-                               0;
-                       break;
-
-               case MLX5E_MC_IPV4:
-                       ret =
-                               BIT(MLX5E_TT_IPV4_UDP)       |
-                               BIT(MLX5E_TT_IPV4)           |
-                               0;
-                       break;
-
-               case MLX5E_MC_IPV6:
-                       ret =
-                               BIT(MLX5E_TT_IPV6_UDP)       |
-                               BIT(MLX5E_TT_IPV6)           |
-                               0;
-                       break;
-
-               case MLX5E_MC_OTHER:
-                       ret =
-                               BIT(MLX5E_TT_ANY)            |
-                               0;
-                       break;
-               }
-
-               break;
-
-       case MLX5E_ALLMULTI:
-               ret =
-                       BIT(MLX5E_TT_IPV4_UDP) |
-                       BIT(MLX5E_TT_IPV6_UDP) |
-                       BIT(MLX5E_TT_IPV4)     |
-                       BIT(MLX5E_TT_IPV6)     |
-                       BIT(MLX5E_TT_ANY)      |
-                       0;
-               break;
-
-       default: /* MLX5E_PROMISC */
-               ret =
-                       BIT(MLX5E_TT_IPV4_TCP)       |
-                       BIT(MLX5E_TT_IPV6_TCP)       |
-                       BIT(MLX5E_TT_IPV4_UDP)       |
-                       BIT(MLX5E_TT_IPV6_UDP)       |
-                       BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-                       BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-                       BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-                       BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-                       BIT(MLX5E_TT_IPV4)           |
-                       BIT(MLX5E_TT_IPV6)           |
-                       BIT(MLX5E_TT_ANY)            |
-                       0;
-               break;
-       }
-
-       return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-                                    struct mlx5e_eth_addr_info *ai,
-                                    int type, u32 *mc, u32 *mv)
-{
-       struct mlx5_flow_destination dest;
-       u8 match_criteria_enable = 0;
-       struct mlx5_flow_rule **rule_p;
-       struct mlx5_flow_table *ft = priv->fts.main.t;
-       u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
-                                  outer_headers.dmac_47_16);
-       u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
-                                  outer_headers.dmac_47_16);
-       u32 *tirn = priv->tirn;
-       u32 tt_vec;
-       int err = 0;
-
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
-       switch (type) {
-       case MLX5E_FULLMATCH:
-               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-               eth_broadcast_addr(mc_dmac);
-               ether_addr_copy(mv_dmac, ai->addr);
-               break;
-
-       case MLX5E_ALLMULTI:
-               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-               mc_dmac[0] = 0x01;
-               mv_dmac[0] = 0x01;
-               break;
-
-       case MLX5E_PROMISC:
-               break;
-       }
-
-       tt_vec = mlx5e_get_tt_vec(ai, type);
-
-       if (tt_vec & BIT(MLX5E_TT_ANY)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_ANY];
-               dest.tir_num = tirn[MLX5E_TT_ANY];
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_ANY);
-       }
-
-       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
-       if (tt_vec & BIT(MLX5E_TT_IPV4)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
-               dest.tir_num = tirn[MLX5E_TT_IPV4];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IP);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4);
-       }
-
-       if (tt_vec & BIT(MLX5E_TT_IPV6)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
-               dest.tir_num = tirn[MLX5E_TT_IPV6];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6);
-       }
-
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
-
-       if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
-               dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IP);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
-       }
-
-       if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
-               dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
-       }
-
-       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
-
-       if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
-               dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IP);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
-       }
-
-       if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
-               dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
-       }
-
-       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
-
-       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
-               dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IP);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
-       }
-
-       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
-               dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
-       }
-
-       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
-
-       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
-               dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IP);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
-       }
-
-       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
-               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
-               dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
-               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-               if (IS_ERR_OR_NULL(*rule_p))
-                       goto err_del_ai;
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
-       }
-
-       return 0;
-
-err_del_ai:
-       err = PTR_ERR(*rule_p);
-       *rule_p = NULL;
-       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
-       return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-                                  struct mlx5e_eth_addr_info *ai, int type)
-{
-       u32 *match_criteria;
-       u32 *match_value;
-       int err = 0;
-
-       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-       if (!match_value || !match_criteria) {
-               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-               err = -ENOMEM;
-               goto add_eth_addr_rule_out;
-       }
-
-       err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
-                                       match_value);
-
-add_eth_addr_rule_out:
-       kvfree(match_criteria);
-       kvfree(match_value);
-
-       return err;
-}
-
 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
 {
        struct net_device *ndev = priv->netdev;
@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
        int i;
 
        list_size = 0;
-       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+       for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
                list_size++;
 
        max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
                return -ENOMEM;
 
        i = 0;
-       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+       for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
                if (i >= list_size)
                        break;
                vlans[i++] = vlan;
@@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
                                 enum mlx5e_vlan_rule_type rule_type,
                                 u16 vid, u32 *mc, u32 *mv)
 {
-       struct mlx5_flow_table *ft = priv->fts.vlan.t;
+       struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
        struct mlx5_flow_destination dest;
        u8 match_criteria_enable = 0;
        struct mlx5_flow_rule **rule_p;
        int err = 0;
 
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-       dest.ft = priv->fts.main.t;
+       dest.ft = priv->fs.l2.ft.t;
 
        match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
 
        switch (rule_type) {
        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-               rule_p = &priv->vlan.untagged_rule;
+               rule_p = &priv->fs.vlan.untagged_rule;
                break;
        case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               rule_p = &priv->vlan.any_vlan_rule;
+               rule_p = &priv->fs.vlan.any_vlan_rule;
                MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
                break;
        default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
-               rule_p = &priv->vlan.active_vlans_rule[vid];
+               rule_p = &priv->fs.vlan.active_vlans_rule[vid];
                MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
                MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
                MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
@@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 {
        switch (rule_type) {
        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-               if (priv->vlan.untagged_rule) {
-                       mlx5_del_flow_rule(priv->vlan.untagged_rule);
-                       priv->vlan.untagged_rule = NULL;
+               if (priv->fs.vlan.untagged_rule) {
+                       mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+                       priv->fs.vlan.untagged_rule = NULL;
                }
                break;
        case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               if (priv->vlan.any_vlan_rule) {
-                       mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
-                       priv->vlan.any_vlan_rule = NULL;
+               if (priv->fs.vlan.any_vlan_rule) {
+                       mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+                       priv->fs.vlan.any_vlan_rule = NULL;
                }
                break;
        case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
                mlx5e_vport_context_update_vlans(priv);
-               if (priv->vlan.active_vlans_rule[vid]) {
-                       mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
-                       priv->vlan.active_vlans_rule[vid] = NULL;
+               if (priv->fs.vlan.active_vlans_rule[vid]) {
+                       mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+                       priv->fs.vlan.active_vlans_rule[vid] = NULL;
                }
                mlx5e_vport_context_update_vlans(priv);
                break;
@@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
-       if (!priv->vlan.filter_disabled)
+       if (!priv->fs.vlan.filter_disabled)
                return;
 
-       priv->vlan.filter_disabled = false;
+       priv->fs.vlan.filter_disabled = false;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 {
-       if (priv->vlan.filter_disabled)
+       if (priv->fs.vlan.filter_disabled)
                return;
 
-       priv->vlan.filter_disabled = true;
+       priv->fs.vlan.filter_disabled = true;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       set_bit(vid, priv->vlan.active_vlans);
+       set_bit(vid, priv->fs.vlan.active_vlans);
 
        return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 }
@@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       clear_bit(vid, priv->vlan.active_vlans);
+       clear_bit(vid, priv->fs.vlan.active_vlans);
 
        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 
@@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 }
 
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
-       for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+       for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
                hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
 
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
-                                struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+                                   struct mlx5e_l2_hash_node *hn)
 {
        switch (hn->action) {
        case MLX5E_ACTION_ADD:
-               mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+               mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
                hn->action = MLX5E_ACTION_NONE;
                break;
 
        case MLX5E_ACTION_DEL:
-               mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
-               mlx5e_del_eth_addr_from_hash(hn);
+               mlx5e_del_l2_flow_rule(priv, &hn->ai);
+               mlx5e_del_l2_from_hash(hn);
                break;
        }
 }
@@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
 
        netif_addr_lock_bh(netdev);
 
-       mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
-                                  priv->netdev->dev_addr);
+       mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+                            priv->netdev->dev_addr);
 
        netdev_for_each_uc_addr(ha, netdev)
-               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+               mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
 
        netdev_for_each_mc_addr(ha, netdev)
-               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+               mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
 
        netif_addr_unlock_bh(netdev);
 }
@@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
 {
        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
        struct net_device *ndev = priv->netdev;
-       struct mlx5e_eth_addr_hash_node *hn;
+       struct mlx5e_l2_hash_node *hn;
        struct hlist_head *addr_list;
        struct hlist_node *tmp;
        int i = 0;
        int hi;
 
-       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+       addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 
        if (is_uc) /* Make sure our own address is pushed first */
                ether_addr_copy(addr_array[i++], ndev->dev_addr);
-       else if (priv->eth_addr.broadcast_enabled)
+       else if (priv->fs.l2.broadcast_enabled)
                ether_addr_copy(addr_array[i++], ndev->broadcast);
 
        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
                                                 int list_type)
 {
        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-       struct mlx5e_eth_addr_hash_node *hn;
+       struct mlx5e_l2_hash_node *hn;
        u8 (*addr_array)[ETH_ALEN] = NULL;
        struct hlist_head *addr_list;
        struct hlist_node *tmp;
@@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
        int err;
        int hi;
 
-       size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+       size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
        max_size = is_uc ?
                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
 
-       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+       addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
                size++;
 
@@ -770,7 +414,7 @@ out:
 
 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 {
-       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+       struct mlx5e_l2_table *ea = &priv->fs.l2;
 
        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 
 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
 {
-       struct mlx5e_eth_addr_hash_node *hn;
+       struct mlx5e_l2_hash_node *hn;
        struct hlist_node *tmp;
        int i;
 
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
-               mlx5e_execute_action(priv, hn);
+       mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+               mlx5e_execute_l2_action(priv, hn);
 
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
-               mlx5e_execute_action(priv, hn);
+       mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+               mlx5e_execute_l2_action(priv, hn);
 }
 
 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
 {
-       struct mlx5e_eth_addr_hash_node *hn;
+       struct mlx5e_l2_hash_node *hn;
        struct hlist_node *tmp;
        int i;
 
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+       mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
                hn->action = MLX5E_ACTION_DEL;
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+       mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
                hn->action = MLX5E_ACTION_DEL;
 
        if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
                                               set_rx_mode_work);
 
-       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+       struct mlx5e_l2_table *ea = &priv->fs.l2;
        struct net_device *ndev = priv->netdev;
 
        bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
        bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 
        if (enable_promisc) {
-               mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
-               if (!priv->vlan.filter_disabled)
+               mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+               if (!priv->fs.vlan.filter_disabled)
                        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
                                            0);
        }
        if (enable_allmulti)
-               mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+               mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
        if (enable_broadcast)
-               mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+               mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
 
        mlx5e_handle_netdev_addr(priv);
 
        if (disable_broadcast)
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+               mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
        if (disable_allmulti)
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+               mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
        if (disable_promisc) {
-               if (!priv->vlan.filter_disabled)
+               if (!priv->fs.vlan.filter_disabled)
                        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
                                            0);
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+               mlx5e_del_l2_flow_rule(priv, &ea->promisc);
        }
 
        ea->promisc_enabled   = promisc_enabled;
@@ -872,224 +516,454 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
        ft->num_groups = 0;
 }
 
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
 {
-       ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+       ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
 }
 
-#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
-#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
-#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
-#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
-#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
-#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
-#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
-#define MLX5E_MAIN_TABLE_SIZE  (MLX5E_MAIN_GROUP0_SIZE +\
-                                MLX5E_MAIN_GROUP1_SIZE +\
-                                MLX5E_MAIN_GROUP2_SIZE +\
-                                MLX5E_MAIN_GROUP3_SIZE +\
-                                MLX5E_MAIN_GROUP4_SIZE +\
-                                MLX5E_MAIN_GROUP5_SIZE +\
-                                MLX5E_MAIN_GROUP6_SIZE +\
-                                MLX5E_MAIN_GROUP7_SIZE +\
-                                MLX5E_MAIN_GROUP8_SIZE)
-
-static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
-                                     int inlen)
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
 {
-       u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-       u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
-                               match_criteria.outer_headers.dmac_47_16);
+       mlx5e_destroy_groups(ft);
+       kfree(ft->g);
+       mlx5_destroy_flow_table(ft->t);
+       ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+       int i;
+
+       for (i = 0; i < MLX5E_NUM_TT; i++) {
+               if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+                       mlx5_del_flow_rule(ttc->rules[i]);
+                       ttc->rules[i] = NULL;
+               }
+       }
+}
+
+static struct {
+       u16 etype;
+       u8 proto;
+} ttc_rules[] = {
+       [MLX5E_TT_IPV4_TCP] = {
+               .etype = ETH_P_IP,
+               .proto = IPPROTO_TCP,
+       },
+       [MLX5E_TT_IPV6_TCP] = {
+               .etype = ETH_P_IPV6,
+               .proto = IPPROTO_TCP,
+       },
+       [MLX5E_TT_IPV4_UDP] = {
+               .etype = ETH_P_IP,
+               .proto = IPPROTO_UDP,
+       },
+       [MLX5E_TT_IPV6_UDP] = {
+               .etype = ETH_P_IPV6,
+               .proto = IPPROTO_UDP,
+       },
+       [MLX5E_TT_IPV4_IPSEC_AH] = {
+               .etype = ETH_P_IP,
+               .proto = IPPROTO_AH,
+       },
+       [MLX5E_TT_IPV6_IPSEC_AH] = {
+               .etype = ETH_P_IPV6,
+               .proto = IPPROTO_AH,
+       },
+       [MLX5E_TT_IPV4_IPSEC_ESP] = {
+               .etype = ETH_P_IP,
+               .proto = IPPROTO_ESP,
+       },
+       [MLX5E_TT_IPV6_IPSEC_ESP] = {
+               .etype = ETH_P_IPV6,
+               .proto = IPPROTO_ESP,
+       },
+       [MLX5E_TT_IPV4] = {
+               .etype = ETH_P_IP,
+               .proto = 0,
+       },
+       [MLX5E_TT_IPV6] = {
+               .etype = ETH_P_IPV6,
+               .proto = 0,
+       },
+       [MLX5E_TT_ANY] = {
+               .etype = 0,
+               .proto = 0,
+       },
+};
+
+static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+                                                     struct mlx5_flow_table *ft,
+                                                     struct mlx5_flow_destination *dest,
+                                                     u16 etype,
+                                                     u8 proto)
+{
+       struct mlx5_flow_rule *rule;
+       u8 match_criteria_enable = 0;
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+
+       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       if (proto) {
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
+               MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+       }
+       if (etype) {
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+       }
+
+       rule = mlx5_add_flow_rule(ft, match_criteria_enable,
+                                 match_criteria, match_value,
+                                 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                 MLX5_FS_DEFAULT_FLOW_TAG,
+                                 dest);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+       }
+out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+       return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_destination dest;
+       struct mlx5e_ttc_table *ttc;
+       struct mlx5_flow_rule **rules;
+       struct mlx5_flow_table *ft;
+       int tt;
        int err;
+
+       ttc = &priv->fs.ttc;
+       ft = ttc->ft.t;
+       rules = ttc->rules;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+               if (tt == MLX5E_TT_ANY)
+                       dest.tir_num = priv->direct_tir[0].tirn;
+               else
+                       dest.tir_num = priv->indir_tirn[tt];
+               rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+                                                   ttc_rules[tt].etype,
+                                                   ttc_rules[tt].proto);
+               if (IS_ERR(rules[tt]))
+                       goto del_rules;
+       }
+
+       return 0;
+
+del_rules:
+       err = PTR_ERR(rules[tt]);
+       rules[tt] = NULL;
+       mlx5e_cleanup_ttc_rules(ttc);
+       return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS   3
+#define MLX5E_TTC_GROUP1_SIZE  BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE  BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE  BIT(0)
+#define MLX5E_TTC_TABLE_SIZE   (MLX5E_TTC_GROUP1_SIZE +\
+                                MLX5E_TTC_GROUP2_SIZE +\
+                                MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5e_flow_table *ft = &ttc->ft;
        int ix = 0;
+       u32 *in;
+       int err;
+       u8 *mc;
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-       MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP0_SIZE;
-       MLX5_SET_CFG(in, end_flow_index, ix - 1);
-       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-       if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
-       ft->num_groups++;
+       ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+                       sizeof(*ft->g), GFP_KERNEL);
+       if (!ft->g)
+               return -ENOMEM;
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               kfree(ft->g);
+               return -ENOMEM;
+       }
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       /* L4 Group */
+       mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP1_SIZE;
+       ix += MLX5E_TTC_GROUP1_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
+               goto err;
        ft->num_groups++;
 
-       memset(in, 0, inlen);
+       /* L3 Group */
+       MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP2_SIZE;
+       ix += MLX5E_TTC_GROUP2_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
+               goto err;
        ft->num_groups++;
 
+       /* Any Group */
        memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-       eth_broadcast_addr(dmac);
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP3_SIZE;
+       ix += MLX5E_TTC_GROUP3_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
+               goto err;
        ft->num_groups++;
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-       eth_broadcast_addr(dmac);
-       MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP4_SIZE;
-       MLX5_SET_CFG(in, end_flow_index, ix - 1);
-       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-       if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
-       ft->num_groups++;
+       kvfree(in);
+       return 0;
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       eth_broadcast_addr(dmac);
-       MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP5_SIZE;
-       MLX5_SET_CFG(in, end_flow_index, ix - 1);
-       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-       if (IS_ERR(ft->g[ft->num_groups]))
-               goto err_destroy_groups;
-       ft->num_groups++;
+err:
+       err = PTR_ERR(ft->g[ft->num_groups]);
+       ft->g[ft->num_groups] = NULL;
+       kvfree(in);
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-       dmac[0] = 0x01;
+       return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+       struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+       mlx5e_cleanup_ttc_rules(ttc);
+       mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+       struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+       struct mlx5e_flow_table *ft = &ttc->ft;
+       int err;
+
+       ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+                                      MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+       if (IS_ERR(ft->t)) {
+               err = PTR_ERR(ft->t);
+               ft->t = NULL;
+               return err;
+       }
+
+       err = mlx5e_create_ttc_table_groups(ttc);
+       if (err)
+               goto err;
+
+       err = mlx5e_generate_ttc_table_rules(priv);
+       if (err)
+               goto err;
+
+       return 0;
+err:
+       mlx5e_destroy_flow_table(ft);
+       return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_l2_rule *ai)
+{
+       if (!IS_ERR_OR_NULL(ai->rule)) {
+               mlx5_del_flow_rule(ai->rule);
+               ai->rule = NULL;
+       }
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+                                 struct mlx5e_l2_rule *ai, int type)
+{
+       struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+       struct mlx5_flow_destination dest;
+       u8 match_criteria_enable = 0;
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+       u8 *mc_dmac;
+       u8 *mv_dmac;
+
+       match_value    = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_l2_rule_out;
+       }
+
+       mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                              outer_headers.dmac_47_16);
+       mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+                              outer_headers.dmac_47_16);
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = priv->fs.ttc.ft.t;
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               eth_broadcast_addr(mc_dmac);
+               ether_addr_copy(mv_dmac, ai->addr);
+               break;
+
+       case MLX5E_ALLMULTI:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               mc_dmac[0] = 0x01;
+               mv_dmac[0] = 0x01;
+               break;
+
+       case MLX5E_PROMISC:
+               break;
+       }
+
+       ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+                                     match_value,
+                                     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+       if (IS_ERR(ai->rule)) {
+               netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+                          __func__, mv_dmac);
+               err = PTR_ERR(ai->rule);
+               ai->rule = NULL;
+       }
+
+add_l2_rule_out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+
+       return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS       3
+#define MLX5E_L2_GROUP1_SIZE      BIT(0)
+#define MLX5E_L2_GROUP2_SIZE      BIT(15)
+#define MLX5E_L2_GROUP3_SIZE      BIT(0)
+#define MLX5E_L2_TABLE_SIZE       (MLX5E_L2_GROUP1_SIZE +\
+                                   MLX5E_L2_GROUP2_SIZE +\
+                                   MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5e_flow_table *ft = &l2_table->ft;
+       int ix = 0;
+       u8 *mc_dmac;
+       u32 *in;
+       int err;
+       u8 *mc;
+
+       ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+       if (!ft->g)
+               return -ENOMEM;
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               kfree(ft->g);
+               return -ENOMEM;
+       }
+
+       mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+       mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+                              outer_headers.dmac_47_16);
+       /* Flow Group for promiscuous */
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP6_SIZE;
+       ix += MLX5E_L2_GROUP1_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
                goto err_destroy_groups;
        ft->num_groups++;
 
-       memset(in, 0, inlen);
+       /* Flow Group for full match */
+       eth_broadcast_addr(mc_dmac);
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-       dmac[0] = 0x01;
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP7_SIZE;
+       ix += MLX5E_L2_GROUP2_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
                goto err_destroy_groups;
        ft->num_groups++;
 
-       memset(in, 0, inlen);
-       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       dmac[0] = 0x01;
+       /* Flow Group for allmulti */
+       eth_zero_addr(mc_dmac);
+       mc_dmac[0] = 0x01;
        MLX5_SET_CFG(in, start_flow_index, ix);
-       ix += MLX5E_MAIN_GROUP8_SIZE;
+       ix += MLX5E_L2_GROUP3_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
                goto err_destroy_groups;
        ft->num_groups++;
 
+       kvfree(in);
        return 0;
 
 err_destroy_groups:
        err = PTR_ERR(ft->g[ft->num_groups]);
        ft->g[ft->num_groups] = NULL;
        mlx5e_destroy_groups(ft);
+       kvfree(in);
 
        return err;
 }
 
-static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
 {
-       u32 *in;
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       int err;
-
-       in = mlx5_vzalloc(inlen);
-       if (!in)
-               return -ENOMEM;
-
-       err = __mlx5e_create_main_groups(ft, in, inlen);
-
-       kvfree(in);
-       return err;
+       mlx5e_destroy_flow_table(&priv->fs.l2.ft);
 }
 
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
 {
-       struct mlx5e_flow_table *ft = &priv->fts.main;
+       struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+       struct mlx5e_flow_table *ft = &l2_table->ft;
        int err;
 
        ft->num_groups = 0;
-       ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
+       ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+                                      MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
 
        if (IS_ERR(ft->t)) {
                err = PTR_ERR(ft->t);
                ft->t = NULL;
                return err;
        }
-       ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
-       if (!ft->g) {
-               err = -ENOMEM;
-               goto err_destroy_main_flow_table;
-       }
 
-       err = mlx5e_create_main_groups(ft);
+       err = mlx5e_create_l2_table_groups(l2_table);
        if (err)
-               goto err_free_g;
-       return 0;
+               goto err_destroy_flow_table;
 
-err_free_g:
-       kfree(ft->g);
+       return 0;
 
-err_destroy_main_flow_table:
+err_destroy_flow_table:
        mlx5_destroy_flow_table(ft->t);
        ft->t = NULL;
 
        return err;
 }
 
-static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
-       mlx5e_destroy_groups(ft);
-       kfree(ft->g);
-       mlx5_destroy_flow_table(ft->t);
-       ft->t = NULL;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
-       mlx5e_destroy_flow_table(&priv->fts.main);
-}
-
 #define MLX5E_NUM_VLAN_GROUPS  2
 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
 #define MLX5E_VLAN_TABLE_SIZE  (MLX5E_VLAN_GROUP0_SIZE +\
                                 MLX5E_VLAN_GROUP1_SIZE)
 
-static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
-                                     int inlen)
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+                                           int inlen)
 {
        int err;
        int ix = 0;
@@ -1128,7 +1002,7 @@ err_destroy_groups:
        return err;
 }
 
-static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
 {
        u32 *in;
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
        if (!in)
                return -ENOMEM;
 
-       err = __mlx5e_create_vlan_groups(ft, in, inlen);
+       err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
 
        kvfree(in);
        return err;
 }
 
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
 {
-       struct mlx5e_flow_table *ft = &priv->fts.vlan;
+       struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
        int err;
 
        ft->num_groups = 0;
-       ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
+       ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+                                      MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
 
        if (IS_ERR(ft->t)) {
                err = PTR_ERR(ft->t);
@@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
        ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
        if (!ft->g) {
                err = -ENOMEM;
-               goto err_destroy_vlan_flow_table;
+               goto err_destroy_vlan_table;
        }
 
-       err = mlx5e_create_vlan_groups(ft);
+       err = mlx5e_create_vlan_table_groups(ft);
        if (err)
                goto err_free_g;
 
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       if (err)
+               goto err_destroy_vlan_flow_groups;
+
        return 0;
 
+err_destroy_vlan_flow_groups:
+       mlx5e_destroy_groups(ft);
 err_free_g:
        kfree(ft->g);
-
-err_destroy_vlan_flow_table:
+err_destroy_vlan_table:
        mlx5_destroy_flow_table(ft->t);
        ft->t = NULL;
 
        return err;
 }
 
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 {
-       mlx5e_destroy_flow_table(&priv->fts.vlan);
+       mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
 }
 
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 {
        int err;
 
-       priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+       priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
                                               MLX5_FLOW_NAMESPACE_KERNEL);
 
-       if (!priv->fts.ns)
+       if (!priv->fs.ns)
                return -EINVAL;
 
-       err = mlx5e_create_vlan_flow_table(priv);
-       if (err)
-               return err;
+       err = mlx5e_arfs_create_tables(priv);
+       if (err) {
+               netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+                          err);
+               priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+       }
 
-       err = mlx5e_create_main_flow_table(priv);
-       if (err)
-               goto err_destroy_vlan_flow_table;
+       err = mlx5e_create_ttc_table(priv);
+       if (err) {
+               netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+                          err);
+               goto err_destroy_arfs_tables;
+       }
 
-       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-       if (err)
-               goto err_destroy_main_flow_table;
+       err = mlx5e_create_l2_table(priv);
+       if (err) {
+               netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+                          err);
+               goto err_destroy_ttc_table;
+       }
+
+       err = mlx5e_create_vlan_table(priv);
+       if (err) {
+               netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+                          err);
+               goto err_destroy_l2_table;
+       }
 
        return 0;
 
-err_destroy_main_flow_table:
-       mlx5e_destroy_main_flow_table(priv);
-err_destroy_vlan_flow_table:
-       mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_l2_table:
+       mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+       mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+       mlx5e_arfs_destroy_tables(priv);
 
        return err;
 }
 
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
 {
        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-       mlx5e_destroy_main_flow_table(priv);
-       mlx5e_destroy_vlan_flow_table(priv);
+       mlx5e_destroy_vlan_table(priv);
+       mlx5e_destroy_l2_table(priv);
+       mlx5e_destroy_ttc_table(priv);
+       mlx5e_arfs_destroy_tables(priv);
 }
index e0adb604f461e00e64952f70a661b62e1af787cc..08040702824d9885311921d8c26e95e410e80919 100644 (file)
@@ -48,6 +48,7 @@ struct mlx5e_sq_param {
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
        u16                        max_inline;
+       bool                       icosq;
 };
 
 struct mlx5e_cq_param {
@@ -59,8 +60,10 @@ struct mlx5e_cq_param {
 struct mlx5e_channel_param {
        struct mlx5e_rq_param      rq;
        struct mlx5e_sq_param      sq;
+       struct mlx5e_sq_param      icosq;
        struct mlx5e_cq_param      rx_cq;
        struct mlx5e_cq_param      tx_cq;
+       struct mlx5e_cq_param      icosq_cq;
 };
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -88,82 +91,15 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
        mutex_unlock(&priv->state_lock);
 }
 
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
-{
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_pport_stats *s = &priv->stats.pport;
-       u32 *in;
-       u32 *out;
-       int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-
-       in  = mlx5_vzalloc(sz);
-       out = mlx5_vzalloc(sz);
-       if (!in || !out)
-               goto free_out;
-
-       MLX5_SET(ppcnt_reg, in, local_port, 1);
-
-       MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
-       mlx5_core_access_reg(mdev, in, sz, out,
-                            sz, MLX5_REG_PPCNT, 0, 0);
-       memcpy(s->IEEE_802_3_counters,
-              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-              sizeof(s->IEEE_802_3_counters));
-
-       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
-       mlx5_core_access_reg(mdev, in, sz, out,
-                            sz, MLX5_REG_PPCNT, 0, 0);
-       memcpy(s->RFC_2863_counters,
-              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-              sizeof(s->RFC_2863_counters));
-
-       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
-       mlx5_core_access_reg(mdev, in, sz, out,
-                            sz, MLX5_REG_PPCNT, 0, 0);
-       memcpy(s->RFC_2819_counters,
-              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-              sizeof(s->RFC_2819_counters));
-
-free_out:
-       kvfree(in);
-       kvfree(out);
-}
-
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_vport_stats *s = &priv->stats.vport;
+       struct mlx5e_sw_stats *s = &priv->stats.sw;
        struct mlx5e_rq_stats *rq_stats;
        struct mlx5e_sq_stats *sq_stats;
-       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
-       u32 *out;
-       int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
-       u64 tx_offload_none;
+       u64 tx_offload_none = 0;
        int i, j;
 
-       out = mlx5_vzalloc(outlen);
-       if (!out)
-               return;
-
-       /* Collect firts the SW counters and then HW for consistency */
-       s->rx_packets           = 0;
-       s->rx_bytes             = 0;
-       s->tx_packets           = 0;
-       s->tx_bytes             = 0;
-       s->tso_packets          = 0;
-       s->tso_bytes            = 0;
-       s->tso_inner_packets    = 0;
-       s->tso_inner_bytes      = 0;
-       s->tx_queue_stopped     = 0;
-       s->tx_queue_wake        = 0;
-       s->tx_queue_dropped     = 0;
-       s->tx_csum_inner        = 0;
-       tx_offload_none         = 0;
-       s->lro_packets          = 0;
-       s->lro_bytes            = 0;
-       s->rx_csum_none         = 0;
-       s->rx_csum_sw           = 0;
-       s->rx_wqe_err           = 0;
+       memset(s, 0, sizeof(*s));
        for (i = 0; i < priv->params.num_channels; i++) {
                rq_stats = &priv->channel[i]->rq.stats;
 
@@ -173,7 +109,13 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_sw   += rq_stats->csum_sw;
+               s->rx_csum_inner += rq_stats->csum_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
+               s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+               s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
+               s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+               s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+               s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
 
                for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
@@ -192,7 +134,23 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                }
        }
 
-       /* HW counters */
+       /* Update calculated offload counters */
+       s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
+       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+                            s->rx_csum_sw;
+
+       s->link_down_events = MLX5_GET(ppcnt_reg,
+                               priv->stats.pport.phy_counters,
+                               counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+       u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+       struct mlx5_core_dev *mdev = priv->mdev;
+
        memset(in, 0, sizeof(in));
 
        MLX5_SET(query_vport_counter_in, in, opcode,
@@ -202,56 +160,69 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
 
        memset(out, 0, outlen);
 
-       if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+       mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+       int prio;
+       void *out;
+       u32 *in;
+
+       in = mlx5_vzalloc(sz);
+       if (!in)
                goto free_out;
 
-#define MLX5_GET_CTR(p, x) \
-       MLX5_GET64(query_vport_counter_out, p, x)
-
-       s->rx_error_packets     =
-               MLX5_GET_CTR(out, received_errors.packets);
-       s->rx_error_bytes       =
-               MLX5_GET_CTR(out, received_errors.octets);
-       s->tx_error_packets     =
-               MLX5_GET_CTR(out, transmit_errors.packets);
-       s->tx_error_bytes       =
-               MLX5_GET_CTR(out, transmit_errors.octets);
-
-       s->rx_unicast_packets   =
-               MLX5_GET_CTR(out, received_eth_unicast.packets);
-       s->rx_unicast_bytes     =
-               MLX5_GET_CTR(out, received_eth_unicast.octets);
-       s->tx_unicast_packets   =
-               MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
-       s->tx_unicast_bytes     =
-               MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
-
-       s->rx_multicast_packets =
-               MLX5_GET_CTR(out, received_eth_multicast.packets);
-       s->rx_multicast_bytes   =
-               MLX5_GET_CTR(out, received_eth_multicast.octets);
-       s->tx_multicast_packets =
-               MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
-       s->tx_multicast_bytes   =
-               MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
-
-       s->rx_broadcast_packets =
-               MLX5_GET_CTR(out, received_eth_broadcast.packets);
-       s->rx_broadcast_bytes   =
-               MLX5_GET_CTR(out, received_eth_broadcast.octets);
-       s->tx_broadcast_packets =
-               MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
-       s->tx_broadcast_bytes   =
-               MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+       MLX5_SET(ppcnt_reg, in, local_port, 1);
 
-       /* Update calculated offload counters */
-       s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
-                              s->rx_csum_sw;
+       out = pstats->IEEE_802_3_counters;
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+       out = pstats->RFC_2863_counters;
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+       out = pstats->RFC_2819_counters;
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+       out = pstats->phy_counters;
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+       for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+               out = pstats->per_prio_counters[prio];
+               MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+               mlx5_core_access_reg(mdev, in, sz, out, sz,
+                                    MLX5_REG_PPCNT, 0, 0);
+       }
 
-       mlx5e_update_pport_counters(priv);
 free_out:
-       kvfree(out);
+       kvfree(in);
+}
+
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+       struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+       if (!priv->q_counter)
+               return;
+
+       mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+                                     &qcnt->rx_out_of_buffer);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+       mlx5e_update_q_counter(priv);
+       mlx5e_update_vport_counters(priv);
+       mlx5e_update_pport_counters(priv);
+       mlx5e_update_sw_counters(priv);
 }
 
 static void mlx5e_update_stats_work(struct work_struct *work)
@@ -262,9 +233,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -280,7 +250,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -310,6 +280,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = priv->mdev;
        void *rqc = param->rqc;
        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+       u32 byte_count;
        int wq_sz;
        int err;
        int i;
@@ -324,32 +295,56 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
 
        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-       rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
-                              cpu_to_node(c->cpu));
-       if (!rq->skb) {
-               err = -ENOMEM;
-               goto err_rq_wq_destroy;
-       }
 
-       rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
-                                            MLX5E_SW2HW_MTU(priv->netdev->mtu);
-       rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+       switch (priv->params.rq_wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+                                           GFP_KERNEL, cpu_to_node(c->cpu));
+               if (!rq->wqe_info) {
+                       err = -ENOMEM;
+                       goto err_rq_wq_destroy;
+               }
+               rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+               rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+
+               rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+               rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+               rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+               byte_count = rq->wqe_sz;
+               break;
+       default: /* MLX5_WQ_TYPE_LINKED_LIST */
+               rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+                                      cpu_to_node(c->cpu));
+               if (!rq->skb) {
+                       err = -ENOMEM;
+                       goto err_rq_wq_destroy;
+               }
+               rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+               rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+
+               rq->wqe_sz = (priv->params.lro_en) ?
+                               priv->params.lro_wqe_sz :
+                               MLX5E_SW2HW_MTU(priv->netdev->mtu);
+               rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
+               byte_count = rq->wqe_sz;
+               byte_count |= MLX5_HW_START_PADDING;
+       }
 
        for (i = 0; i < wq_sz; i++) {
                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
-               u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
 
-               wqe->data.lkey       = c->mkey_be;
-               wqe->data.byte_count =
-                       cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+               wqe->data.byte_count = cpu_to_be32(byte_count);
        }
 
+       rq->wq_type = priv->params.rq_wq_type;
        rq->pdev    = c->pdev;
        rq->netdev  = c->netdev;
        rq->tstamp  = &priv->tstamp;
        rq->channel = c;
        rq->ix      = c->ix;
        rq->priv    = c->priv;
+       rq->mkey_be = c->mkey_be;
+       rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
 
        return 0;
 
@@ -361,7 +356,14 @@ err_rq_wq_destroy:
 
 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
-       kfree(rq->skb);
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               kfree(rq->wqe_info);
+               break;
+       default: /* MLX5_WQ_TYPE_LINKED_LIST */
+               kfree(rq->skb);
+       }
+
        mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
@@ -390,6 +392,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
+       MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
@@ -404,7 +407,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
        return err;
 }
 
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+                                int next_state)
 {
        struct mlx5e_channel *c = rq->channel;
        struct mlx5e_priv *priv = c->priv;
@@ -432,6 +436,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
        return err;
 }
 
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+       MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+       MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+       MLX5_SET(rqc, rqc, vsd, vsd);
+       MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
        mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -458,6 +492,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
                         struct mlx5e_rq_param *param,
                         struct mlx5e_rq *rq)
 {
+       struct mlx5e_sq *sq = &c->icosq;
+       u16 pi = sq->pc & sq->wq.sz_m1;
        int err;
 
        err = mlx5e_create_rq(c, param, rq);
@@ -468,12 +504,15 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (err)
                goto err_destroy_rq;
 
-       err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
        if (err)
                goto err_disable_rq;
 
        set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-       mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+       sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
+       sq->ico_wqe_info[pi].num_wqebbs = 1;
+       mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
 
        return 0;
 
@@ -490,7 +529,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-       mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
        while (!mlx5_wq_ll_is_empty(&rq->wq))
                msleep(20);
 
@@ -539,7 +578,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
 
        void *sqc = param->sqc;
        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
-       int txq_ix;
        int err;
 
        err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
@@ -567,8 +605,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        if (err)
                goto err_sq_wq_destroy;
 
-       txq_ix = c->ix + tc * priv->params.num_channels;
-       sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+       if (param->icosq) {
+               u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+               sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
+                                               wq_sz,
+                                               GFP_KERNEL,
+                                               cpu_to_node(c->cpu));
+               if (!sq->ico_wqe_info) {
+                       err = -ENOMEM;
+                       goto err_free_sq_db;
+               }
+       } else {
+               int txq_ix;
+
+               txq_ix = c->ix + tc * priv->params.num_channels;
+               sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+               priv->txq_to_sq_map[txq_ix] = sq;
+       }
 
        sq->pdev      = c->pdev;
        sq->tstamp    = &priv->tstamp;
@@ -577,10 +631,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        sq->tc        = tc;
        sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
        sq->bf_budget = MLX5E_SQ_BF_BUDGET;
-       priv->txq_to_sq_map[txq_ix] = sq;
 
        return 0;
 
+err_free_sq_db:
+       mlx5e_free_sq_db(sq);
+
 err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
@@ -595,6 +651,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
        struct mlx5e_channel *c = sq->channel;
        struct mlx5e_priv *priv = c->priv;
 
+       kfree(sq->ico_wqe_info);
        mlx5e_free_sq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
        mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -623,10 +680,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
 
        memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-       MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
-       MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
+       MLX5_SET(sqc,  sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+       MLX5_SET(sqc,  sqc, cqn,                sq->cq.mcq.cqn);
        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
-       MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
+       MLX5_SET(sqc,  sqc, tis_lst_sz,         param->icosq ? 0 : 1);
        MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
 
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
@@ -701,9 +758,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
        if (err)
                goto err_disable_sq;
 
-       set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-       netdev_tx_reset_queue(sq->txq);
-       netif_tx_start_queue(sq->txq);
+       if (sq->txq) {
+               set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+               netdev_tx_reset_queue(sq->txq);
+               netif_tx_start_queue(sq->txq);
+       }
 
        return 0;
 
@@ -724,15 +783,19 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
-       clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-       napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
-       netif_tx_disable_queue(sq->txq);
+       if (sq->txq) {
+               clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+               /* prevent netif_tx_wake_queue */
+               napi_synchronize(&sq->channel->napi);
+               netif_tx_disable_queue(sq->txq);
 
-       /* ensure hw is notified of all pending wqes */
-       if (mlx5e_sq_has_room_for(sq, 1))
-               mlx5e_send_nop(sq, true);
+               /* ensure hw is notified of all pending wqes */
+               if (mlx5e_sq_has_room_for(sq, 1))
+                       mlx5e_send_nop(sq, true);
+
+               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+       }
 
-       mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
        while (sq->cc != sq->pc) /* wait till sq is empty */
                msleep(20);
 
@@ -986,10 +1049,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 
        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 
-       err = mlx5e_open_tx_cqs(c, cparam);
+       err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
        if (err)
                goto err_napi_del;
 
+       err = mlx5e_open_tx_cqs(c, cparam);
+       if (err)
+               goto err_close_icosq_cq;
+
        err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
                            priv->params.rx_cq_moderation_usec,
                            priv->params.rx_cq_moderation_pkts);
@@ -998,10 +1065,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 
        napi_enable(&c->napi);
 
-       err = mlx5e_open_sqs(c, cparam);
+       err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
        if (err)
                goto err_disable_napi;
 
+       err = mlx5e_open_sqs(c, cparam);
+       if (err)
+               goto err_close_icosq;
+
        err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
        if (err)
                goto err_close_sqs;
@@ -1014,6 +1085,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 err_close_sqs:
        mlx5e_close_sqs(c);
 
+err_close_icosq:
+       mlx5e_close_sq(&c->icosq);
+
 err_disable_napi:
        napi_disable(&c->napi);
        mlx5e_close_cq(&c->rq.cq);
@@ -1021,6 +1095,9 @@ err_disable_napi:
 err_close_tx_cqs:
        mlx5e_close_tx_cqs(c);
 
+err_close_icosq_cq:
+       mlx5e_close_cq(&c->icosq.cq);
+
 err_napi_del:
        netif_napi_del(&c->napi);
        napi_hash_del(&c->napi);
@@ -1033,9 +1110,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
 {
        mlx5e_close_rq(&c->rq);
        mlx5e_close_sqs(c);
+       mlx5e_close_sq(&c->icosq);
        napi_disable(&c->napi);
        mlx5e_close_cq(&c->rq.cq);
        mlx5e_close_tx_cqs(c);
+       mlx5e_close_cq(&c->icosq.cq);
        netif_napi_del(&c->napi);
 
        napi_hash_del(&c->napi);
@@ -1050,11 +1129,23 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        void *rqc = param->rqc;
        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
 
-       MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
+       switch (priv->params.rq_wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               MLX5_SET(wq, wq, log_wqe_num_of_strides,
+                        priv->params.mpwqe_log_num_strides - 9);
+               MLX5_SET(wq, wq, log_wqe_stride_size,
+                        priv->params.mpwqe_log_stride_sz - 6);
+               MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+               break;
+       default: /* MLX5_WQ_TYPE_LINKED_LIST */
+               MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+       }
+
        MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
        MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
        MLX5_SET(wq, wq, pd,               priv->pdn);
+       MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
 
        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
        param->wq.linear = 1;
@@ -1069,17 +1160,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
 }
 
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
-                                struct mlx5e_sq_param *param)
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+                                       struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
 
-       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
        MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
        MLX5_SET(wq, wq, pd,            priv->pdn);
 
        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+
        param->max_inline = priv->params.tx_max_inline;
 }
 
@@ -1095,8 +1196,22 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
                                    struct mlx5e_cq_param *param)
 {
        void *cqc = param->cqc;
+       u8 log_cq_size;
 
-       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
+       switch (priv->params.rq_wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               log_cq_size = priv->params.log_rq_size +
+                       priv->params.mpwqe_log_num_strides;
+               break;
+       default: /* MLX5_WQ_TYPE_LINKED_LIST */
+               log_cq_size = priv->params.log_rq_size;
+       }
+
+       MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+       if (priv->params.rx_cqe_compress) {
+               MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+               MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+       }
 
        mlx5e_build_common_cq_param(priv, param);
 }
@@ -1106,25 +1221,52 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
 {
        void *cqc = param->cqc;
 
-       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
+       MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+       mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+                                    struct mlx5e_cq_param *param,
+                                    u8 log_wq_size)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
 
        mlx5e_build_common_cq_param(priv, param);
 }
 
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
-                                     struct mlx5e_channel_param *cparam)
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_sq_param *param,
+                                   u8 log_wq_size)
 {
-       memset(cparam, 0, sizeof(*cparam));
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+
+       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+       MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+       param->icosq = true;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+{
+       u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
 
        mlx5e_build_rq_param(priv, &cparam->rq);
        mlx5e_build_sq_param(priv, &cparam->sq);
+       mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
        mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
        mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+       mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
 }
 
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
-       struct mlx5e_channel_param cparam;
+       struct mlx5e_channel_param *cparam;
        int nch = priv->params.num_channels;
        int err = -ENOMEM;
        int i;
@@ -1136,12 +1278,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
        priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
-       if (!priv->channel || !priv->txq_to_sq_map)
+       cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+       if (!priv->channel || !priv->txq_to_sq_map || !cparam)
                goto err_free_txq_to_sq_map;
 
-       mlx5e_build_channel_param(priv, &cparam);
+       mlx5e_build_channel_param(priv, cparam);
+
        for (i = 0; i < nch; i++) {
-               err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+               err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
                if (err)
                        goto err_close_channels;
        }
@@ -1152,6 +1297,7 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
                        goto err_close_channels;
        }
 
+       kfree(cparam);
        return 0;
 
 err_close_channels:
@@ -1161,6 +1307,7 @@ err_close_channels:
 err_free_txq_to_sq_map:
        kfree(priv->txq_to_sq_map);
        kfree(priv->channel);
+       kfree(cparam);
 
        return err;
 }
@@ -1200,48 +1347,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
 
        for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
                int ix = i;
+               u32 rqn;
 
                if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
 
                ix = priv->params.indirection_rqt[ix];
-               MLX5_SET(rqtc, rqtc, rq_num[i],
-                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                        priv->channel[ix]->rq.rqn :
-                        priv->drop_rq.rqn);
+               rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                               priv->channel[ix]->rq.rqn :
+                               priv->drop_rq.rqn;
+               MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
        }
 }
 
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
-                               enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+                                     int ix)
 {
+       u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                       priv->channel[ix]->rq.rqn :
+                       priv->drop_rq.rqn;
 
-       switch (rqt_ix) {
-       case MLX5E_INDIRECTION_RQT:
-               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
-               break;
-
-       default: /* MLX5E_SINGLE_RQ_RQT */
-               MLX5_SET(rqtc, rqtc, rq_num[0],
-                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                        priv->channel[0]->rq.rqn :
-                        priv->drop_rq.rqn);
-
-               break;
-       }
+       MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
 }
 
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
        void *rqtc;
        int inlen;
-       int sz;
        int err;
-
-       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+       u32 *in;
 
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
@@ -1253,26 +1388,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+       if (sz > 1) /* RSS */
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+       else
+               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
-       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+       err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
 
        kvfree(in);
+       return err;
+}
+
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+{
+       mlx5_core_destroy_rqt(priv->mdev, rqtn);
+}
+
+static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       u32 *rqtn;
+       int err;
+       int ix;
+
+       /* Indirect RQT */
+       rqtn = &priv->indir_rqtn;
+       err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
+       if (err)
+               return err;
+
+       /* Direct RQTs */
+       for (ix = 0; ix < nch; ix++) {
+               rqtn = &priv->direct_tir[ix].rqtn;
+               err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+               if (err)
+                       goto err_destroy_rqts;
+       }
+
+       return 0;
+
+err_destroy_rqts:
+       for (ix--; ix >= 0; ix--)
+               mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
+
+       mlx5e_destroy_rqt(priv, priv->indir_rqtn);
 
        return err;
 }
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       int i;
+
+       for (i = 0; i < nch; i++)
+               mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
+
+       mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
        void *rqtc;
        int inlen;
-       int sz;
+       u32 *in;
        int err;
 
-       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
        inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
        if (!in)
@@ -1281,27 +1463,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
-       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+       if (sz > 1) /* RSS */
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+       else
+               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
        MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
 
-       err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+       err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
 
        kvfree(in);
 
        return err;
 }
 
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
-       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
-       mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
-       mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+       u32 rqtn;
+       int ix;
+
+       rqtn = priv->indir_rqtn;
+       mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+       for (ix = 0; ix < priv->params.num_channels; ix++) {
+               rqtn = priv->direct_tir[ix].rqtn;
+               mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+       }
 }
 
 static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1346,6 +1532,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
        int inlen;
        int err;
        int tt;
+       int ix;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -1357,23 +1544,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
 
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
-       for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
-               err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+                                          inlen);
                if (err)
-                       break;
+                       goto free_in;
        }
 
+       for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+               err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+                                          in, inlen);
+               if (err)
+                       goto free_in;
+       }
+
+free_in:
        kvfree(in);
 
        return err;
 }
 
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
-                                                 u32 tirn)
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
 {
        void *in;
        int inlen;
        int err;
+       int i;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -1382,46 +1578,70 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
-       err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
+               err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
+                                          inlen);
+               if (err)
+                       return err;
+       }
+
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5_core_modify_tir(priv->mdev,
+                                          priv->direct_tir[i].tirn, in,
+                                          inlen);
+               if (err)
+                       return err;
+       }
 
        kvfree(in);
 
-       return err;
+       return 0;
 }
 
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
 {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
        int err;
-       int i;
 
-       for (i = 0; i < MLX5E_NUM_TT; i++) {
-               err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
-                                                            priv->tirn[i]);
-               if (err)
-                       return err;
-       }
+       err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+       if (err)
+               return err;
 
+       /* Update vport context MTU */
+       mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
        return 0;
 }
 
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 hw_mtu = 0;
+       int err;
+
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err || !hw_mtu) /* fallback to port oper mtu */
+               mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+       *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
-       int hw_mtu;
+       u16 mtu;
        int err;
 
-       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+       err = mlx5e_set_mtu(priv, netdev->mtu);
        if (err)
                return err;
 
-       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+       mlx5e_query_mtu(priv, &mtu);
+       if (mtu != netdev->mtu)
+               netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+                           __func__, mtu, netdev->mtu);
 
-       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
-               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
-                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
-
-       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+       netdev->mtu = mtu;
        return 0;
 }
 
@@ -1478,8 +1698,11 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_redirect_rqts(priv);
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+       priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -1685,7 +1908,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
                mlx5e_destroy_tis(priv, tc);
 }
 
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+                                     enum mlx5e_traffic_types tt)
 {
        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
 
@@ -1706,19 +1930,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
-       switch (tt) {
-       case MLX5E_TT_ANY:
-               MLX5_SET(tirc, tirc, indirect_table,
-                        priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
-               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-               break;
-       default:
-               MLX5_SET(tirc, tirc, indirect_table,
-                        priv->rqtn[MLX5E_INDIRECTION_RQT]);
-               mlx5e_build_tir_ctx_hash(tirc, priv);
-               break;
-       }
+       MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+       mlx5e_build_tir_ctx_hash(tirc, priv);
 
        switch (tt) {
        case MLX5E_TT_IPV4_TCP:
@@ -1798,64 +2011,107 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
                         MLX5_HASH_IP);
                break;
+       default:
+               WARN_ONCE(true,
+                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
        }
 }
 
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+                                      u32 rqtn)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
+       MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+       mlx5e_build_tir_ctx_lro(tirc, priv);
+
+       MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+       MLX5_SET(tirc, tirc, indirect_table, rqtn);
+       MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
        void *tirc;
        int inlen;
+       u32 *tirn;
        int err;
+       u32 *in;
+       int ix;
+       int tt;
 
        inlen = MLX5_ST_SZ_BYTES(create_tir_in);
        in = mlx5_vzalloc(inlen);
        if (!in)
                return -ENOMEM;
 
-       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+       /* indirect tirs */
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               memset(in, 0, inlen);
+               tirn = &priv->indir_tirn[tt];
+               tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+               mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+               err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+               if (err)
+                       goto err_destroy_tirs;
+       }
+
+       /* direct tirs */
+       for (ix = 0; ix < nch; ix++) {
+               memset(in, 0, inlen);
+               tirn = &priv->direct_tir[ix].tirn;
+               tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+               mlx5e_build_direct_tir_ctx(priv, tirc,
+                                          priv->direct_tir[ix].rqtn);
+               err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+               if (err)
+                       goto err_destroy_ch_tirs;
+       }
+
+       kvfree(in);
+
+       return 0;
 
-       mlx5e_build_tir_ctx(priv, tirc, tt);
+err_destroy_ch_tirs:
+       for (ix--; ix >= 0; ix--)
+               mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
 
-       err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+err_destroy_tirs:
+       for (tt--; tt >= 0; tt--)
+               mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
 
        kvfree(in);
 
        return err;
 }
 
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
 {
-       mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+       int nch = mlx5e_get_max_num_channels(priv->mdev);
+       int i;
+
+       for (i = 0; i < nch; i++)
+               mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
+
+       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+               mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
 }
 
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
 {
-       int err;
+       int err = 0;
        int i;
 
-       for (i = 0; i < MLX5E_NUM_TT; i++) {
-               err = mlx5e_create_tir(priv, i);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return 0;
+
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
                if (err)
-                       goto err_destroy_tirs;
+                       return err;
        }
 
        return 0;
-
-err_destroy_tirs:
-       for (i--; i >= 0; i--)
-               mlx5e_destroy_tir(priv, i);
-
-       return err;
-}
-
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
-{
-       int i;
-
-       for (i = 0; i < MLX5E_NUM_TT; i++)
-               mlx5e_destroy_tir(priv, i);
 }
 
 static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -1914,19 +2170,37 @@ static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_sw_stats *sstats = &priv->stats.sw;
        struct mlx5e_vport_stats *vstats = &priv->stats.vport;
-
-       stats->rx_packets = vstats->rx_packets;
-       stats->rx_bytes   = vstats->rx_bytes;
-       stats->tx_packets = vstats->tx_packets;
-       stats->tx_bytes   = vstats->tx_bytes;
-       stats->multicast  = vstats->rx_multicast_packets +
-                           vstats->tx_multicast_packets;
-       stats->tx_errors  = vstats->tx_error_packets;
-       stats->rx_errors  = vstats->rx_error_packets;
-       stats->tx_dropped = vstats->tx_queue_dropped;
-       stats->rx_crc_errors = 0;
-       stats->rx_length_errors = 0;
+       struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+       stats->rx_packets = sstats->rx_packets;
+       stats->rx_bytes   = sstats->rx_bytes;
+       stats->tx_packets = sstats->tx_packets;
+       stats->tx_bytes   = sstats->tx_bytes;
+
+       stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+       stats->tx_dropped = sstats->tx_queue_dropped;
+
+       stats->rx_length_errors =
+               PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+               PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+               PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+       stats->rx_crc_errors =
+               PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+       stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+       stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+       stats->tx_carrier_errors =
+               PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+       stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+                          stats->rx_frame_errors;
+       stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+       /* vport multicast also counts packets that are dropped due to steering
+        * or rx out of buffer
+        */
+       stats->multicast =
+               VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
 
        return stats;
 }
@@ -1935,7 +2209,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1950,71 +2224,180 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
 
-static int mlx5e_set_features(struct net_device *netdev,
-                             netdev_features_t features)
+#define MLX5E_SET_FEATURE(netdev, feature, enable)     \
+       do {                                            \
+               if (enable)                             \
+                       netdev->features |= feature;    \
+               else                                    \
+                       netdev->features &= ~feature;   \
+       } while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       int err = 0;
-       netdev_features_t changes = features ^ netdev->features;
+       bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       int err;
 
        mutex_lock(&priv->state_lock);
 
-       if (changes & NETIF_F_LRO) {
-               bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
-               if (was_opened)
-                       mlx5e_close_locked(priv->netdev);
-
-               priv->params.lro_en = !!(features & NETIF_F_LRO);
-               err = mlx5e_modify_tirs_lro(priv);
-               if (err)
-                       mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
-                                      err);
+       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+               mlx5e_close_locked(priv->netdev);
 
-               if (was_opened)
-                       err = mlx5e_open_locked(priv->netdev);
+       priv->params.lro_en = enable;
+       err = mlx5e_modify_tirs_lro(priv);
+       if (err) {
+               netdev_err(netdev, "lro modify failed, %d\n", err);
+               priv->params.lro_en = !enable;
        }
 
+       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+               mlx5e_open_locked(priv->netdev);
+
        mutex_unlock(&priv->state_lock);
 
-       if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
-               if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
-                       mlx5e_enable_vlan_filter(priv);
-               else
-                       mlx5e_disable_vlan_filter(priv);
-       }
+       return err;
+}
+
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       if (enable)
+               mlx5e_enable_vlan_filter(priv);
+       else
+               mlx5e_disable_vlan_filter(priv);
 
-       if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
-           mlx5e_tc_num_filters(priv)) {
+       return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       if (!enable && mlx5e_tc_num_filters(priv)) {
                netdev_err(netdev,
                           "Active offloaded tc filters, can't turn hw_tc_offload off\n");
                return -EINVAL;
        }
 
+       return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+
+       priv->params.vlan_strip_disable = !enable;
+       err = mlx5e_modify_rqs_vsd(priv, !enable);
+       if (err)
+               priv->params.vlan_strip_disable = enable;
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       if (enable)
+               err = mlx5e_arfs_enable(priv);
+       else
+               err = mlx5e_arfs_disable(priv);
+
        return err;
 }
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+                               netdev_features_t wanted_features,
+                               netdev_features_t feature,
+                               mlx5e_feature_handler feature_handler)
+{
+       netdev_features_t changes = wanted_features ^ netdev->features;
+       bool enable = !!(wanted_features & feature);
+       int err;
+
+       if (!(changes & feature))
+               return 0;
+
+       err = feature_handler(netdev, enable);
+       if (err) {
+               netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+                          enable ? "Enable" : "Disable", feature, err);
+               return err;
+       }
+
+       MLX5E_SET_FEATURE(netdev, feature, enable);
+       return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+                             netdev_features_t features)
+{
+       int err;
+
+       err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+                                   set_feature_lro);
+       err |= mlx5e_handle_feature(netdev, features,
+                                   NETIF_F_HW_VLAN_CTAG_FILTER,
+                                   set_feature_vlan_filter);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+                                   set_feature_tc_num_filters);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+                                   set_feature_rx_all);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+                                   set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+                                   set_feature_arfs);
+#endif
+
+       return err ? -EINVAL : 0;
+}
+
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
 
 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        bool was_opened;
-       int max_mtu;
+       u16 max_mtu;
+       u16 min_mtu;
        int err = 0;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
        max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+       min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
 
-       if (new_mtu > max_mtu) {
+       if (new_mtu > max_mtu || new_mtu < min_mtu) {
                netdev_err(netdev,
-                          "%s: Bad MTU (%d) > (%d) Max\n",
-                          __func__, new_mtu, max_mtu);
+                          "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+                          __func__, new_mtu, min_mtu, max_mtu);
                return -EINVAL;
        }
 
@@ -2063,6 +2446,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
                                           vlan, qos);
 }
 
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
 static int mlx5_vport_link2ifla(u8 esw_link)
 {
        switch (esw_link) {
@@ -2127,7 +2525,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2138,7 +2536,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2205,6 +2603,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_set_features        = mlx5e_set_features,
        .ndo_change_mtu          = mlx5e_change_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
+#endif
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2224,8 +2625,13 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_add_vxlan_port      = mlx5e_add_vxlan_port,
        .ndo_del_vxlan_port      = mlx5e_del_vxlan_port,
        .ndo_features_check      = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
+#endif
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
+       .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
+       .ndo_set_vf_trust        = mlx5e_set_vf_trust,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
@@ -2283,25 +2689,121 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
 }
 #endif
 
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+                                  u32 *indirection_rqt, int len,
                                   int num_channels)
 {
+       int node = mdev->priv.numa_node;
+       int node_num_of_cores;
        int i;
 
+       if (node == -1)
+               node = first_online_node;
+
+       node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+       if (node_num_of_cores)
+               num_channels = min_t(int, num_channels, node_num_of_cores);
+
        for (i = 0; i < len; i++)
                indirection_rqt[i] = i % num_channels;
 }
 
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+       return MLX5_CAP_GEN(mdev, striding_rq) &&
+               MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+               MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+       enum pcie_link_width width;
+       enum pci_bus_speed speed;
+       int err = 0;
+
+       err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+       if (err)
+               return err;
+
+       if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+               return -EINVAL;
+
+       switch (speed) {
+       case PCIE_SPEED_2_5GT:
+               *pci_bw = 2500 * width;
+               break;
+       case PCIE_SPEED_5_0GT:
+               *pci_bw = 5000 * width;
+               break;
+       case PCIE_SPEED_8_0GT:
+               *pci_bw = 8000 * width;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+       return (link_speed && pci_bw &&
+               (pci_bw < 40000) && (pci_bw < link_speed));
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
                                    int num_channels)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       u32 link_speed = 0;
+       u32 pci_bw = 0;
 
        priv->params.log_sq_size           =
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
-       priv->params.log_rq_size           =
-               MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+       priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
+               MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+               MLX5_WQ_TYPE_LINKED_LIST;
+
+       /* set CQE compression */
+       priv->params.rx_cqe_compress_admin = false;
+       if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+           MLX5_CAP_GEN(mdev, vport_group_manager)) {
+               mlx5e_get_max_linkspeed(mdev, &link_speed);
+               mlx5e_get_pci_bw(mdev, &pci_bw);
+               mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+                             link_speed, pci_bw);
+               priv->params.rx_cqe_compress_admin =
+                       cqe_compress_heuristic(link_speed, pci_bw);
+       }
+
+       priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
+
+       switch (priv->params.rq_wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+               priv->params.mpwqe_log_stride_sz =
+                       priv->params.rx_cqe_compress ?
+                       MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+                       MLX5_MPWRQ_LOG_STRIDE_SIZE;
+               priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+                       priv->params.mpwqe_log_stride_sz;
+               priv->params.lro_en = true;
+               break;
+       default: /* MLX5_WQ_TYPE_LINKED_LIST */
+               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+       }
+
+       mlx5_core_info(mdev,
+                      "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+                      priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+                      BIT(priv->params.log_rq_size),
+                      BIT(priv->params.mpwqe_log_stride_sz),
+                      priv->params.rx_cqe_compress_admin);
+
+       priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+                                           BIT(priv->params.log_rq_size));
        priv->params.rx_cq_moderation_usec =
                MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
        priv->params.rx_cq_moderation_pkts =
@@ -2311,15 +2813,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.tx_cq_moderation_pkts =
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-       priv->params.min_rx_wqes           =
-               MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
        priv->params.num_tc                = 1;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
                            sizeof(priv->params.toeplitz_hash_key));
 
-       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+       mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
                                      MLX5E_INDIR_RQT_SIZE, num_channels);
 
        priv->params.lro_wqe_sz            =
@@ -2356,6 +2856,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool fcs_supported;
+       bool fcs_enabled;
 
        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
@@ -2390,25 +2892,41 @@ static void mlx5e_build_netdev(struct net_device *netdev)
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (mlx5e_vxlan_allowed(mdev)) {
-               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+                                          NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
                netdev->hw_enc_features |= NETIF_F_IP_CSUM;
-               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+               netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
                netdev->hw_enc_features |= NETIF_F_TSO;
                netdev->hw_enc_features |= NETIF_F_TSO6;
-               netdev->hw_enc_features |= NETIF_F_RXHASH;
                netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                          NETIF_F_GSO_PARTIAL;
+               netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
+       mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+       if (fcs_supported)
+               netdev->hw_features |= NETIF_F_RXALL;
+
        netdev->features          = netdev->hw_features;
        if (!priv->params.lro_en)
                netdev->features  &= ~NETIF_F_LRO;
 
+       if (fcs_enabled)
+               netdev->features  &= ~NETIF_F_RXALL;
+
 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
        if (FT_CAP(flow_modify_en) &&
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
-           FT_CAP(flow_table_modify))
-               priv->netdev->hw_features      |= NETIF_F_HW_TC;
+           FT_CAP(flow_table_modify)) {
+               netdev->hw_features      |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+               netdev->hw_features      |= NETIF_F_NTUPLE;
+#endif
+       }
 
        netdev->features         |= NETIF_F_HIGHDMA;
 
@@ -2442,6 +2960,61 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
        return err;
 }
 
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
+
+       err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+       if (err) {
+               mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+               priv->q_counter = 0;
+       }
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+       if (!priv->q_counter)
+               return;
+
+       mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_create_mkey_mbox_in *in;
+       struct mlx5_mkey_seg *mkc;
+       int inlen = sizeof(*in);
+       u64 npages =
+               mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       mkc = &in->seg;
+       mkc->status = MLX5_MKEY_STATUS_FREE;
+       mkc->flags = MLX5_PERM_UMR_EN |
+                    MLX5_PERM_LOCAL_READ |
+                    MLX5_PERM_LOCAL_WRITE |
+                    MLX5_ACCESS_MODE_MTT;
+
+       mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+       mkc->flags_pd = cpu_to_be32(priv->pdn);
+       mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
+       mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+       mkc->log2_page_size = PAGE_SHIFT;
+
+       err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
+                                   NULL, NULL);
+
+       kvfree(in);
+
+       return err;
+}
+
 static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 {
        struct net_device *netdev;
@@ -2467,10 +3040,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2491,10 +3068,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_dealloc_transport_domain;
        }
 
+       err = mlx5e_create_umr_mkey(priv);
+       if (err) {
+               mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+               goto err_destroy_mkey;
+       }
+
        err = mlx5e_create_tises(priv);
        if (err) {
                mlx5_core_warn(mdev, "create tises failed, %d\n", err);
-               goto err_destroy_mkey;
+               goto err_destroy_umr_mkey;
        }
 
        err = mlx5e_open_drop_rq(priv);
@@ -2503,37 +3086,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_destroy_tises;
        }
 
-       err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+       err = mlx5e_create_rqts(priv);
        if (err) {
-               mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+               mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
                goto err_close_drop_rq;
        }
 
-       err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-       if (err) {
-               mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
-               goto err_destroy_rqt_indir;
-       }
-
        err = mlx5e_create_tirs(priv);
        if (err) {
                mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
-               goto err_destroy_rqt_single;
+               goto err_destroy_rqts;
        }
 
-       err = mlx5e_create_flow_tables(priv);
+       err = mlx5e_create_flow_steering(priv);
        if (err) {
-               mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+               mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
                goto err_destroy_tirs;
        }
 
-       mlx5e_init_eth_addr(priv);
+       mlx5e_create_q_counter(priv);
+
+       mlx5e_init_l2_addr(priv);
 
        mlx5e_vxlan_init(priv);
 
        err = mlx5e_tc_init(priv);
        if (err)
-               goto err_destroy_flow_tables;
+               goto err_dealloc_q_counters;
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
@@ -2545,28 +3124,29 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_tc_cleanup;
        }
 
-       if (mlx5e_vxlan_allowed(mdev))
+       if (mlx5e_vxlan_allowed(mdev)) {
+               rtnl_lock();
                vxlan_get_rx_port(netdev);
+               rtnl_unlock();
+       }
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
 err_tc_cleanup:
        mlx5e_tc_cleanup(priv);
 
-err_destroy_flow_tables:
-       mlx5e_destroy_flow_tables(priv);
+err_dealloc_q_counters:
+       mlx5e_destroy_q_counter(priv);
+       mlx5e_destroy_flow_steering(priv);
 
 err_destroy_tirs:
        mlx5e_destroy_tirs(priv);
 
-err_destroy_rqt_single:
-       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
-       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_destroy_rqts:
+       mlx5e_destroy_rqts(priv);
 
 err_close_drop_rq:
        mlx5e_close_drop_rq(priv);
@@ -2574,6 +3154,9 @@ err_close_drop_rq:
 err_destroy_tises:
        mlx5e_destroy_tises(priv);
 
+err_destroy_umr_mkey:
+       mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
+
 err_destroy_mkey:
        mlx5_core_destroy_mkey(mdev, &priv->mkey);
 
@@ -2586,6 +3169,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -2599,23 +3185,37 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
-       unregister_netdev(netdev);
+       flush_workqueue(priv->wq);
+       if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+               netif_device_detach(netdev);
+               mutex_lock(&priv->state_lock);
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_close_locked(netdev);
+               mutex_unlock(&priv->state_lock);
+       } else {
+               unregister_netdev(netdev);
+       }
+
        mlx5e_tc_cleanup(priv);
        mlx5e_vxlan_cleanup(priv);
-       mlx5e_destroy_flow_tables(priv);
+       mlx5e_destroy_q_counter(priv);
+       mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_tirs(priv);
-       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+       mlx5e_destroy_rqts(priv);
        mlx5e_close_drop_rq(priv);
        mlx5e_destroy_tises(priv);
+       mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
        mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-       free_netdev(netdev);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
+
+       if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+               free_netdev(netdev);
 }
 
 static void *mlx5e_get_netdev(void *vpriv)
index 58d4e2f962c3591f3dea1bbf0fccf4c5a62d0810..f3456798c5960fda619a2a57074aa9aaf902e1ef 100644 (file)
@@ -42,13 +42,149 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
        return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
 }
 
-static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
-                                    struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+                                      void *data)
+{
+       u32 ci = cqcc & cq->wq.sz_m1;
+
+       memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+                                        struct mlx5e_cq *cq, u32 cqcc)
+{
+       mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+       cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
+       cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+       rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+       mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+       cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+       u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+       u32 wq_sz = 1 << cq->wq.log_sz;
+       u32 ci = cqcc & cq->wq.sz_m1;
+       u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+       for (; ci < ci_top; ci++, n--) {
+               struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+               cqe->op_own = op_own;
+       }
+
+       if (unlikely(ci == wq_sz)) {
+               op_own = !op_own;
+               for (ci = 0; ci < n; ci++) {
+                       struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+                       cqe->op_own = op_own;
+               }
+       }
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+                                       struct mlx5e_cq *cq, u32 cqcc)
+{
+       u16 wqe_cnt_step;
+
+       cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+       cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
+       cq->title.op_own      &= 0xf0;
+       cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
+       cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
+
+       wqe_cnt_step =
+               rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+               mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+       cq->decmprs_wqe_counter =
+               (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+                                               struct mlx5e_cq *cq, u32 cqcc)
+{
+       mlx5e_decompress_cqe(rq, cq, cqcc);
+       cq->title.rss_hash_type   = 0;
+       cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+                                            struct mlx5e_cq *cq,
+                                            int update_owner_only,
+                                            int budget_rem)
+{
+       u32 cqcc = cq->wq.cc + update_owner_only;
+       u32 cqe_count;
+       u32 i;
+
+       cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+       for (i = update_owner_only; i < cqe_count;
+            i++, cq->mini_arr_idx++, cqcc++) {
+               if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+                       mlx5e_read_mini_arr_slot(cq, cqcc);
+
+               mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+               rq->handle_rx_cqe(rq, &cq->title);
+       }
+       mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+       cq->wq.cc = cqcc;
+       cq->decmprs_left -= cqe_count;
+       rq->stats.cqe_compress_pkts += cqe_count;
+
+       return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+                                             struct mlx5e_cq *cq,
+                                             int budget_rem)
+{
+       mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+       mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+       mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+       rq->handle_rx_cqe(rq, &cq->title);
+       cq->mini_arr_idx++;
+
+       return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+       bool was_opened;
+
+       if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+               return;
+
+       mutex_lock(&priv->state_lock);
+
+       if (priv->params.rx_cqe_compress == val)
+               goto unlock;
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(priv->netdev);
+
+       priv->params.rx_cqe_compress = val;
+
+       if (was_opened)
+               mlx5e_open_locked(priv->netdev);
+
+unlock:
+       mutex_unlock(&priv->state_lock);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
 {
        struct sk_buff *skb;
        dma_addr_t dma_addr;
 
-       skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+       skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
        if (unlikely(!skb))
                return -ENOMEM;
 
@@ -62,10 +198,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
        if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
                goto err_free_skb;
 
-       skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
        *((dma_addr_t *)skb->cb) = dma_addr;
-       wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+       wqe->data.addr = cpu_to_be64(dma_addr);
+       wqe->data.lkey = rq->mkey_be;
 
        rq->skb[ix] = skb;
 
@@ -77,18 +212,389 @@ err_free_skb:
        return -ENOMEM;
 }
 
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+       return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void
+mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
+                               struct mlx5e_mpw_info *wi,
+                               u32 wqe_offset, u32 len)
+{
+       dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
+                               len, DMA_FROM_DEVICE);
+}
+
+static inline void
+mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
+                                   struct mlx5e_mpw_info *wi,
+                                   u32 wqe_offset, u32 len)
+{
+       /* No dma pre sync for fragmented MPWQE */
+}
+
+static inline void
+mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
+                               struct sk_buff *skb,
+                               struct mlx5e_mpw_info *wi,
+                               u32 page_idx, u32 frag_offset,
+                               u32 len)
+{
+       unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+       wi->skbs_frags[page_idx]++;
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                       &wi->dma_info.page[page_idx], frag_offset,
+                       len, truesize);
+}
+
+static inline void
+mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                   struct sk_buff *skb,
+                                   struct mlx5e_mpw_info *wi,
+                                   u32 page_idx, u32 frag_offset,
+                                   u32 len)
+{
+       unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+       dma_sync_single_for_cpu(rq->pdev,
+                               wi->umr.dma_info[page_idx].addr + frag_offset,
+                               len, DMA_FROM_DEVICE);
+       wi->skbs_frags[page_idx]++;
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                       wi->umr.dma_info[page_idx].page, frag_offset,
+                       len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
+                                  struct sk_buff *skb,
+                                  struct mlx5e_mpw_info *wi,
+                                  u32 page_idx, u32 offset,
+                                  u32 headlen)
+{
+       struct page *page = &wi->dma_info.page[page_idx];
+
+       skb_copy_to_linear_data(skb, page_address(page) + offset,
+                               ALIGN(headlen, sizeof(long)));
+}
+
+static inline void
+mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
+                                      struct sk_buff *skb,
+                                      struct mlx5e_mpw_info *wi,
+                                      u32 page_idx, u32 offset,
+                                      u32 headlen)
+{
+       u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+       struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+       unsigned int len;
+
+        /* Aligning len to sizeof(long) optimizes memcpy performance */
+       len = ALIGN(headlen_pg, sizeof(long));
+       dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+                               DMA_FROM_DEVICE);
+       skb_copy_to_linear_data_offset(skb, 0,
+                                      page_address(dma_info->page) + offset,
+                                      len);
+       if (unlikely(offset + headlen > PAGE_SIZE)) {
+               dma_info++;
+               headlen_pg = len;
+               len = ALIGN(headlen - headlen_pg, sizeof(long));
+               dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+                                       DMA_FROM_DEVICE);
+               skb_copy_to_linear_data_offset(skb, headlen_pg,
+                                              page_address(dma_info->page),
+                                              len);
+       }
+}
+
+static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+{
+       return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+               wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+                               struct mlx5e_sq *sq,
+                               struct mlx5e_umr_wqe *wqe,
+                               u16 ix)
+{
+       struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
+       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+       struct mlx5_wqe_data_seg      *dseg = &wqe->data;
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+       u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+
+       memset(wqe, 0, sizeof(*wqe));
+       cseg->opmod_idx_opcode =
+               cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+                           MLX5_OPCODE_UMR);
+       cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+                                     ds_cnt);
+       cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
+       cseg->imm       = rq->umr_mkey_be;
+
+       ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+       ucseg->klm_octowords =
+               cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+       ucseg->bsf_octowords =
+               cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+       ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+       dseg->lkey = sq->mkey_be;
+       dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct mlx5e_sq *sq = &rq->channel->icosq;
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       struct mlx5e_umr_wqe *wqe;
+       u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+       u16 pi;
+
+       /* fill sq edge with nops to avoid wqe wrap around */
+       while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+               sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+               sq->ico_wqe_info[pi].num_wqebbs = 1;
+               mlx5e_send_nop(sq, true);
+       }
+
+       wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+       mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+       sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
+       sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+       sq->pc += num_wqebbs;
+       mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+        * To avoid copying garbage after the mtt array, we allocate
+        * a little more.
+        */
+       return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+                    MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
+                                   struct mlx5e_mpw_info *wi,
+                                   int i)
+{
+       struct page *page;
+
+       page = dev_alloc_page();
+       if (unlikely(!page))
+               return -ENOMEM;
+
+       wi->umr.dma_info[i].page = page;
+       wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+                                               PCI_DMA_FROMDEVICE);
+       if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+               put_page(page);
+               return -ENOMEM;
+       }
+       wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
+
+       return 0;
+}
+
+static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                          struct mlx5e_rx_wqe *wqe,
+                                          u16 ix)
+{
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+       int i;
+
+       wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
+                                  MLX5_MPWRQ_PAGES_PER_WQE,
+                                  GFP_ATOMIC);
+       if (unlikely(!wi->umr.dma_info))
+               goto err_out;
+
+       /* We allocate more than mtt_sz as we will align the pointer */
+       wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
+                                      GFP_ATOMIC);
+       if (unlikely(!wi->umr.mtt_no_align))
+               goto err_free_umr;
+
+       wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
+       wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
+                                         PCI_DMA_TODEVICE);
+       if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
+               goto err_free_mtt;
+
+       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+               if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+                       goto err_unmap;
+               atomic_add(mlx5e_mpwqe_strides_per_page(rq),
+                          &wi->umr.dma_info[i].page->_count);
+               wi->skbs_frags[i] = 0;
+       }
+
+       wi->consumed_strides = 0;
+       wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
+       wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
+       wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
+       wi->free_wqe     = mlx5e_free_rx_fragmented_mpwqe;
+       wqe->data.lkey = rq->umr_mkey_be;
+       wqe->data.addr = cpu_to_be64(dma_offset);
+
+       return 0;
+
+err_unmap:
+       while (--i >= 0) {
+               dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+               atomic_sub(mlx5e_mpwqe_strides_per_page(rq),
+                          &wi->umr.dma_info[i].page->_count);
+               put_page(wi->umr.dma_info[i].page);
+       }
+       dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+
+err_free_mtt:
+       kfree(wi->umr.mtt_no_align);
+
+err_free_umr:
+       kfree(wi->umr.dma_info);
+
+err_out:
+       return -ENOMEM;
+}
+
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5e_mpw_info *wi)
+{
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int i;
+
+       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+               dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+               atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
+                          &wi->umr.dma_info[i].page->_count);
+               put_page(wi->umr.dma_info[i].page);
+       }
+       dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+       kfree(wi->umr.mtt_no_align);
+       kfree(wi->umr.dma_info);
+}
+
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+       clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+       mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+       rq->stats.mpwqe_frag++;
+
+       /* ensure wqes are visible to device before updating doorbell record */
+       dma_wmb();
+
+       mlx5_wq_ll_update_db_record(wq);
+}
+
+static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                                      struct mlx5e_rx_wqe *wqe,
+                                      u16 ix)
+{
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       gfp_t gfp_mask;
+       int i;
+
+       gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+       wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+                                            MLX5_MPWRQ_WQE_PAGE_ORDER);
+       if (unlikely(!wi->dma_info.page))
+               return -ENOMEM;
+
+       wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+                                        rq->wqe_sz, PCI_DMA_FROMDEVICE);
+       if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+               put_page(wi->dma_info.page);
+               return -ENOMEM;
+       }
+
+       /* We split the high-order page into order-0 ones and manage their
+        * reference counter to minimize the memory held by small skb fragments
+        */
+       split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+               atomic_add(mlx5e_mpwqe_strides_per_page(rq),
+                          &wi->dma_info.page[i]._count);
+               wi->skbs_frags[i] = 0;
+       }
+
+       wi->consumed_strides = 0;
+       wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
+       wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
+       wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
+       wi->free_wqe     = mlx5e_free_rx_linear_mpwqe;
+       wqe->data.lkey = rq->mkey_be;
+       wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+       return 0;
+}
+
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                               struct mlx5e_mpw_info *wi)
+{
+       int i;
+
+       dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+                      PCI_DMA_FROMDEVICE);
+       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+               atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
+                          &wi->dma_info.page[i]._count);
+               put_page(&wi->dma_info.page[i]);
+       }
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+       int err;
+
+       err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
+       if (unlikely(err)) {
+               err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
+               if (unlikely(err))
+                       return err;
+               set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+               mlx5e_post_umr_wqe(rq, ix);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+#define RQ_CANNOT_POST(rq) \
+               (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
+                test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 {
        struct mlx5_wq_ll *wq = &rq->wq;
 
-       if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+       if (unlikely(RQ_CANNOT_POST(rq)))
                return false;
 
        while (!mlx5_wq_ll_is_full(wq)) {
                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+               int err;
 
-               if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+               err = rq->alloc_wqe(rq, wqe, wq->head);
+               if (unlikely(err)) {
+                       if (err != -EBUSY)
+                               rq->stats.buff_alloc_err++;
                        break;
+               }
 
                mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
        }
@@ -101,7 +607,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
        return !mlx5_wq_ll_is_full(wq);
 }
 
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+                                u32 cqe_bcnt)
 {
        struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
        struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
@@ -112,7 +619,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
        int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
                       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
-       u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+       u16 tot_len = cqe_bcnt - ETH_HLEN;
 
        if (eth->h_proto == htons(ETH_P_IP)) {
                tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
@@ -176,35 +683,43 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 
        if (lro) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else if (likely(is_first_ethertype_ip(skb))) {
+               return;
+       }
+
+       if (is_first_ethertype_ip(skb)) {
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
                rq->stats.csum_sw++;
-       } else {
-               goto csum_none;
+               return;
        }
 
-       return;
-
+       if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+                  (cqe->hds_ip_ext & CQE_L4_OK))) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               if (cqe_is_tunneled(cqe)) {
+                       skb->csum_level = 1;
+                       skb->encapsulation = 1;
+                       rq->stats.csum_inner++;
+               }
+               return;
+       }
 csum_none:
        skb->ip_summed = CHECKSUM_NONE;
        rq->stats.csum_none++;
 }
 
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+                                     u32 cqe_bcnt,
                                      struct mlx5e_rq *rq,
                                      struct sk_buff *skb)
 {
        struct net_device *netdev = rq->netdev;
-       u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
        struct mlx5e_tstamp *tstamp = rq->tstamp;
        int lro_num_seg;
 
-       skb_put(skb, cqe_bcnt);
-
        lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
        if (lro_num_seg > 1) {
-               mlx5e_lro_update_hdr(skb, cqe);
+               mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
                rq->stats.lro_packets++;
                rq->stats.lro_bytes += cqe_bcnt;
@@ -213,10 +728,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
                mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
 
-       mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-
-       skb->protocol = eth_type_trans(skb, netdev);
-
        skb_record_rx_queue(skb, rq->ix);
 
        if (likely(netdev->features & NETIF_F_RXHASH))
@@ -227,52 +738,165 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                       be16_to_cpu(cqe->vlan_info));
 
        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+       mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+       skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+                                        struct mlx5_cqe64 *cqe,
+                                        u32 cqe_bcnt,
+                                        struct sk_buff *skb)
+{
+       rq->stats.packets++;
+       rq->stats.bytes += cqe_bcnt;
+       mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+       napi_gro_receive(rq->cq.napi, skb);
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+       struct mlx5e_rx_wqe *wqe;
+       struct sk_buff *skb;
+       __be16 wqe_counter_be;
+       u16 wqe_counter;
+       u32 cqe_bcnt;
+
+       wqe_counter_be = cqe->wqe_counter;
+       wqe_counter    = be16_to_cpu(wqe_counter_be);
+       wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+       skb            = rq->skb[wqe_counter];
+       prefetch(skb->data);
+       rq->skb[wqe_counter] = NULL;
+
+       dma_unmap_single(rq->pdev,
+                        *((dma_addr_t *)skb->cb),
+                        rq->wqe_sz,
+                        DMA_FROM_DEVICE);
+
+       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+               rq->stats.wqe_err++;
+               dev_kfree_skb(skb);
+               goto wq_ll_pop;
+       }
+
+       cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+       skb_put(skb, cqe_bcnt);
+
+       mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+wq_ll_pop:
+       mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+                      &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+                                          struct mlx5_cqe64 *cqe,
+                                          struct mlx5e_mpw_info *wi,
+                                          u32 cqe_bcnt,
+                                          struct sk_buff *skb)
+{
+       u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
+       u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
+       u32 wqe_offset     = stride_ix * rq->mpwqe_stride_sz;
+       u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
+       u32 page_idx       = wqe_offset >> PAGE_SHIFT;
+       u32 head_page_idx  = page_idx;
+       u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+       u32 frag_offset    = head_offset + headlen;
+       u16 byte_cnt       = cqe_bcnt - headlen;
+
+       if (unlikely(frag_offset >= PAGE_SIZE)) {
+               page_idx++;
+               frag_offset -= PAGE_SIZE;
+       }
+       wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
+
+       while (byte_cnt) {
+               u32 pg_consumed_bytes =
+                       min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+               wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
+                                pg_consumed_bytes);
+               byte_cnt -= pg_consumed_bytes;
+               frag_offset = 0;
+               page_idx++;
+       }
+       /* copy header */
+       wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
+                           headlen);
+       /* skb linear part was allocated with headlen and aligned to long */
+       skb->tail += headlen;
+       skb->len  += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+       u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
+       u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+       struct mlx5e_rx_wqe  *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+       struct sk_buff *skb;
+       u16 cqe_bcnt;
+
+       wi->consumed_strides += cstrides;
+
+       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+               rq->stats.wqe_err++;
+               goto mpwrq_cqe_out;
+       }
+
+       if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+               rq->stats.mpwqe_filler++;
+               goto mpwrq_cqe_out;
+       }
+
+       skb = napi_alloc_skb(rq->cq.napi,
+                            ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+                                  sizeof(long)));
+       if (unlikely(!skb)) {
+               rq->stats.buff_alloc_err++;
+               goto mpwrq_cqe_out;
+       }
+
+       prefetch(skb->data);
+       cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+       mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+       mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+       if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+               return;
+
+       wi->free_wqe(rq, wi);
+       mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }
 
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-       int work_done;
+       int work_done = 0;
+
+       if (cq->decmprs_left)
+               work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
-       for (work_done = 0; work_done < budget; work_done++) {
-               struct mlx5e_rx_wqe *wqe;
-               struct mlx5_cqe64 *cqe;
-               struct sk_buff *skb;
-               __be16 wqe_counter_be;
-               u16 wqe_counter;
+       for (; work_done < budget; work_done++) {
+               struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
 
-               cqe = mlx5e_get_cqe(cq);
                if (!cqe)
                        break;
 
-               mlx5_cqwq_pop(&cq->wq);
-
-               wqe_counter_be = cqe->wqe_counter;
-               wqe_counter    = be16_to_cpu(wqe_counter_be);
-               wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
-               skb            = rq->skb[wqe_counter];
-               prefetch(skb->data);
-               rq->skb[wqe_counter] = NULL;
-
-               dma_unmap_single(rq->pdev,
-                                *((dma_addr_t *)skb->cb),
-                                rq->wqe_sz,
-                                DMA_FROM_DEVICE);
-
-               if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-                       rq->stats.wqe_err++;
-                       dev_kfree_skb(skb);
-                       goto wq_ll_pop;
+               if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+                       work_done +=
+                               mlx5e_decompress_cqes_start(rq, cq,
+                                                           budget - work_done);
+                       continue;
                }
 
-               mlx5e_build_rx_skb(cqe, rq, skb);
-               rq->stats.packets++;
-               rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
-               napi_gro_receive(cq->napi, skb);
+               mlx5_cqwq_pop(&cq->wq);
 
-wq_ll_pop:
-               mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
-                              &wqe->next.next_wqe_index);
+               rq->handle_rx_cqe(rq, cqe);
        }
 
        mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
new file mode 100644 (file)
index 0000000..83bc32b
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+       (*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+       be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+       (*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+       be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+
+struct counter_desc {
+       char            name[ETH_GSTRING_LEN];
+       int             offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 tso_inner_packets;
+       u64 tso_inner_bytes;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 rx_csum_good;
+       u64 rx_csum_none;
+       u64 rx_csum_sw;
+       u64 rx_csum_inner;
+       u64 tx_csum_offload;
+       u64 tx_csum_inner;
+       u64 tx_queue_stopped;
+       u64 tx_queue_wake;
+       u64 tx_queue_dropped;
+       u64 rx_wqe_err;
+       u64 rx_mpwqe_filler;
+       u64 rx_mpwqe_frag;
+       u64 rx_buff_alloc_err;
+       u64 rx_cqe_compress_blks;
+       u64 rx_cqe_compress_pkts;
+
+       /* Special handling counters */
+       u64 link_down_events;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+};
+
+struct mlx5e_qcounter_stats {
+       u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+                                               vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+       __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+       { "rx_vport_error_packets",
+               VPORT_COUNTER_OFF(received_errors.packets) },
+       { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
+       { "tx_vport_error_packets",
+               VPORT_COUNTER_OFF(transmit_errors.packets) },
+       { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
+       { "rx_vport_unicast_packets",
+               VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+       { "rx_vport_unicast_bytes",
+               VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+       { "tx_vport_unicast_packets",
+               VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+       { "tx_vport_unicast_bytes",
+               VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+       { "rx_vport_multicast_packets",
+               VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+       { "rx_vport_multicast_bytes",
+               VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+       { "tx_vport_multicast_packets",
+               VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+       { "tx_vport_multicast_bytes",
+               VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+       { "rx_vport_broadcast_packets",
+               VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+       { "rx_vport_broadcast_bytes",
+               VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+       { "tx_vport_broadcast_packets",
+               VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+       { "tx_vport_broadcast_bytes",
+               VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+       MLX5_BYTE_OFF(ppcnt_reg, \
+                     counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+       MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+                  counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+       MLX5_BYTE_OFF(ppcnt_reg, \
+                     counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+       MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+                  counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+       MLX5_BYTE_OFF(ppcnt_reg, \
+                     counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+       MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+                  counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+       MLX5_BYTE_OFF(ppcnt_reg, \
+                     counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+       MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+                  counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO                         8
+
+struct mlx5e_pport_stats {
+       __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+       __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+       __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+       __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+       __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+       { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+       { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
+       { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+       { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
+       { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+       { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
+       { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+       { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+       { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+       { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+       { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
+       { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
+       { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
+       { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+       { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+       { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
+       { "unsupported_op_rx",
+               PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+       { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+       { "pause_ctrl_tx",
+               PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+       { "in_octets", PPORT_2863_OFF(if_in_octets) },
+       { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
+       { "in_discards", PPORT_2863_OFF(if_in_discards) },
+       { "in_errors", PPORT_2863_OFF(if_in_errors) },
+       { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
+       { "out_octets", PPORT_2863_OFF(if_out_octets) },
+       { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
+       { "out_discards", PPORT_2863_OFF(if_out_discards) },
+       { "out_errors", PPORT_2863_OFF(if_out_errors) },
+       { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
+       { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
+       { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
+       { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+       { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
+       { "octets", PPORT_2819_OFF(ether_stats_octets) },
+       { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
+       { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
+       { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
+       { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
+       { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+       { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
+       { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
+       { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
+       { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
+       { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
+       { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+       { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+       { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+       { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+       { "p1024to1518octets",
+               PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+       { "p1519to2047octets",
+               PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+       { "p2048to4095octets",
+               PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+       { "p4096to8191octets",
+               PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+       { "p8192to10239octets",
+               PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+       { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
+       { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
+       { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
+       { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+       { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+       { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+       { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+       { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+       { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+struct mlx5e_rq_stats {
+       u64 packets;
+       u64 bytes;
+       u64 csum_sw;
+       u64 csum_inner;
+       u64 csum_none;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 wqe_err;
+       u64 mpwqe_filler;
+       u64 mpwqe_frag;
+       u64 buff_alloc_err;
+       u64 cqe_compress_blks;
+       u64 cqe_compress_pkts;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+};
+
+struct mlx5e_sq_stats {
+       /* commonly accessed in data path */
+       u64 packets;
+       u64 bytes;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 tso_inner_packets;
+       u64 tso_inner_bytes;
+       u64 csum_offload_inner;
+       u64 nop;
+       /* less likely accessed in data path */
+       u64 csum_offload_none;
+       u64 stopped;
+       u64 wake;
+       u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+};
+
+#define NUM_SW_COUNTERS                        ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS                 ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS             ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS       ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS                ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS                ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+       ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+       ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS             (NUM_PPORT_802_3_COUNTERS + \
+                                        NUM_PPORT_2863_COUNTERS  + \
+                                        NUM_PPORT_2819_COUNTERS  + \
+                                        NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+                                        NUM_PPORT_PRIO)
+#define NUM_RQ_STATS                   ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS                   ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+       struct mlx5e_sw_stats sw;
+       struct mlx5e_qcounter_stats qcnt;
+       struct mlx5e_vport_stats vport;
+       struct mlx5e_pport_stats pport;
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
index b3de09f13425f39bc0afbc70e8966dcc36d49206..ef017c0decdc39b3de23bc9a260f17930f1e64cc 100644 (file)
@@ -46,8 +46,8 @@ struct mlx5e_tc_flow {
        struct mlx5_flow_rule   *rule;
 };
 
-#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
 
 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
                                                u32 *match_c, u32 *match_v,
@@ -55,33 +55,35 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
 {
        struct mlx5_flow_destination dest = {
                .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
-               {.ft = priv->fts.vlan.t},
+               {.ft = priv->fs.vlan.ft.t},
        };
        struct mlx5_flow_rule *rule;
        bool table_created = false;
 
-       if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
-               priv->fts.tc.t =
-                       mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
-                                                           MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
-                                                           MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
-               if (IS_ERR(priv->fts.tc.t)) {
+       if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+               priv->fs.tc.t =
+                       mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+                                                           MLX5E_TC_PRIO,
+                                                           MLX5E_TC_TABLE_NUM_ENTRIES,
+                                                           MLX5E_TC_TABLE_NUM_GROUPS,
+                                                           0);
+               if (IS_ERR(priv->fs.tc.t)) {
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
-                       return ERR_CAST(priv->fts.tc.t);
+                       return ERR_CAST(priv->fs.tc.t);
                }
 
                table_created = true;
        }
 
-       rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
+       rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
                                  match_c, match_v,
                                  action, flow_tag,
                                  action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
 
        if (IS_ERR(rule) && table_created) {
-               mlx5_destroy_flow_table(priv->fts.tc.t);
-               priv->fts.tc.t = NULL;
+               mlx5_destroy_flow_table(priv->fs.tc.t);
+               priv->fs.tc.t = NULL;
        }
 
        return rule;
@@ -93,8 +95,8 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
        mlx5_del_flow_rule(rule);
 
        if (!mlx5e_tc_num_filters(priv)) {
-               mlx5_destroy_flow_table(priv->fts.tc.t);
-               priv->fts.tc.t = NULL;
+               mlx5_destroy_flow_table(priv->fs.tc.t);
+               priv->fs.tc.t = NULL;
        }
 }
 
@@ -310,7 +312,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
                           struct tc_cls_flower_offload *f)
 {
-       struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
        u32 *match_c;
        u32 *match_v;
        int err = 0;
@@ -376,7 +378,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
                        struct tc_cls_flower_offload *f)
 {
        struct mlx5e_tc_flow *flow;
-       struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
 
        flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
                                      tc->ht_params);
@@ -401,7 +403,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
 
 int mlx5e_tc_init(struct mlx5e_priv *priv)
 {
-       struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
 
        tc->ht_params = mlx5e_tc_flow_ht_params;
        return rhashtable_init(&tc->ht, &tc->ht_params);
@@ -418,12 +420,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
 
 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
 {
-       struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
 
        rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
 
-       if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
-               mlx5_destroy_flow_table(priv->fts.tc.t);
-               priv->fts.tc.t = NULL;
+       if (!IS_ERR_OR_NULL(tc->t)) {
+               mlx5_destroy_flow_table(tc->t);
+               tc->t = NULL;
        }
 }
index d677428dc10f2a8aa425e2eb56aee661712777d3..a4f17b974d622aea98c1b14ab62ab999fc8edcad 100644 (file)
@@ -45,7 +45,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
 
 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
 {
-       return atomic_read(&priv->fts.tc.ht.nelems);
+       return atomic_read(&priv->fs.tc.ht.nelems);
 }
 
 #endif /* __MLX5_EN_TC_H__ */
index 1ffc7cb6f78c4ab7de755ac751e5cf24f0b8a426..229ab16fb8d3a5c92afec2ece0a30e2de9a46236 100644 (file)
@@ -54,10 +54,11 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
 
        sq->skb[pi] = NULL;
        sq->pc++;
+       sq->stats.nop++;
 
        if (notify_hw) {
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe, 0);
+               mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
        }
 }
 
@@ -309,7 +310,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                        bf_sz = wi->num_wqebbs << 3;
 
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe, bf_sz);
+               mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
        }
 
        /* fill sq edge with nops to avoid wqe wrap around */
@@ -387,7 +388,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        wi = &sq->wqe_info[ci];
 
                        if (unlikely(!skb)) { /* nop */
-                               sq->stats.nop++;
                                sqcc++;
                                continue;
                        }
index 9bb4395aceeb495156550821d34d6ea976a812d2..c38781fa567df10158b747fbee5c35135a3e7631 100644 (file)
@@ -49,6 +49,60 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
        return cqe;
 }
 
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5_wq_cyc *wq;
+       struct mlx5_cqe64 *cqe;
+       struct mlx5e_sq *sq;
+       u16 sqcc;
+
+       cqe = mlx5e_get_cqe(cq);
+       if (likely(!cqe))
+               return;
+
+       sq = container_of(cq, struct mlx5e_sq, cq);
+       wq = &sq->wq;
+
+       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+        * otherwise a cq overrun may occur
+        */
+       sqcc = sq->cc;
+
+       do {
+               u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+               struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+
+               mlx5_cqwq_pop(&cq->wq);
+               sqcc += icowi->num_wqebbs;
+
+               if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+                       WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+                                 cqe->op_own);
+                       break;
+               }
+
+               switch (icowi->opcode) {
+               case MLX5_OPCODE_NOP:
+                       break;
+               case MLX5_OPCODE_UMR:
+                       mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+                       break;
+               default:
+                       WARN_ONCE(true,
+                                 "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+                                 icowi->opcode);
+               }
+
+       } while ((cqe = mlx5e_get_cqe(cq)));
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       sq->cc = sqcc;
+}
+
 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 {
        struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -64,6 +118,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 
        work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
        busy |= work_done == budget;
+
+       mlx5e_poll_ico_cq(&c->icosq.cq);
+
        busy |= mlx5e_post_rx_wqes(&c->rq);
 
        if (busy)
@@ -80,6 +137,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        for (i = 0; i < c->num_tc; i++)
                mlx5e_cq_arm(&c->sq[i].cq);
        mlx5e_cq_arm(&c->rq.cq);
+       mlx5e_cq_arm(&c->icosq.cq);
 
        return work_done;
 }
@@ -89,7 +147,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 
        set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
-       barrier();
        napi_schedule(cq->napi);
 }
 
index bc3d9f8a75c1d86532d9b579b4b8c6f8a36f9f46..b84a6918a7006253c503cb924c347f2123854781 100644 (file)
@@ -77,16 +77,20 @@ struct vport_addr {
        u8                     action;
        u32                    vport;
        struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+       /* A flag indicating that mac was added due to mc promiscuous vport */
+       bool mc_promisc;
 };
 
 enum {
        UC_ADDR_CHANGE = BIT(0),
        MC_ADDR_CHANGE = BIT(1),
+       PROMISC_CHANGE = BIT(3),
 };
 
 /* Vport context events */
 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
-                           MC_ADDR_CHANGE)
+                           MC_ADDR_CHANGE | \
+                           PROMISC_CHANGE)
 
 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                                        u32 events_mask)
@@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
        if (events_mask & MC_ADDR_CHANGE)
                MLX5_SET(nic_vport_context, nic_vport_ctx,
                         event_on_mc_address_change, 1);
+       if (events_mask & PROMISC_CHANGE)
+               MLX5_SET(nic_vport_context, nic_vport_ctx,
+                        event_on_promisc_change, 1);
 
        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
        if (err)
@@ -323,30 +330,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
 
 /* E-Switch FDB */
 static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+                        u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
 {
-       int match_header = MLX5_MATCH_OUTER_HEADERS;
-       struct mlx5_flow_destination dest;
+       int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+                           MLX5_MATCH_OUTER_HEADERS);
        struct mlx5_flow_rule *flow_rule = NULL;
+       struct mlx5_flow_destination dest;
+       void *mv_misc = NULL;
+       void *mc_misc = NULL;
+       u8 *dmac_v = NULL;
+       u8 *dmac_c = NULL;
        u32 *match_v;
        u32 *match_c;
-       u8 *dmac_v;
-       u8 *dmac_c;
 
+       if (rx_rule)
+               match_header |= MLX5_MATCH_MISC_PARAMETERS;
        match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
        match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
        if (!match_v || !match_c) {
                pr_warn("FDB: Failed to alloc match parameters\n");
                goto out;
        }
+
        dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
                              outer_headers.dmac_47_16);
        dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
                              outer_headers.dmac_47_16);
 
-       ether_addr_copy(dmac_v, mac);
-       /* Match criteria mask */
-       memset(dmac_c, 0xff, 6);
+       if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+               ether_addr_copy(dmac_v, mac_v);
+               ether_addr_copy(dmac_c, mac_c);
+       }
+
+       if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+               mv_misc  = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
+               mc_misc  = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+               MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+               MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+       }
 
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport_num = vport;
@@ -373,6 +395,39 @@ out:
        return flow_rule;
 }
 
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+       u8 mac_c[ETH_ALEN];
+
+       eth_broadcast_addr(mac_c);
+       return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+       u8 mac_c[ETH_ALEN];
+       u8 mac_v[ETH_ALEN];
+
+       eth_zero_addr(mac_c);
+       eth_zero_addr(mac_v);
+       mac_c[0] = 0x01;
+       mac_v[0] = 0x01;
+       return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+       u8 mac_c[ETH_ALEN];
+       u8 mac_v[ETH_ALEN];
+
+       eth_zero_addr(mac_c);
+       eth_zero_addr(mac_v);
+       return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
 static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 {
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -401,34 +456,80 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        memset(flow_group_in, 0, inlen);
 
        table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-       fdb = mlx5_create_flow_table(root_ns, 0, table_size);
+       fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
        if (IS_ERR_OR_NULL(fdb)) {
                err = PTR_ERR(fdb);
                esw_warn(dev, "Failed to create FDB Table err %d\n", err);
                goto out;
        }
+       esw->fdb_table.fdb = fdb;
 
+       /* Addresses group : Full match unicast/multicast addresses */
        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
                 MLX5_MATCH_OUTER_HEADERS);
        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
        dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+       /* Preserve 2 entries for allmulti and promisc rules*/
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
        eth_broadcast_addr(dmac);
-
        g = mlx5_create_flow_group(fdb, flow_group_in);
        if (IS_ERR_OR_NULL(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create flow group err(%d)\n", err);
                goto out;
        }
-
        esw->fdb_table.addr_grp = g;
-       esw->fdb_table.fdb = fdb;
+
+       /* Allmulti group : One rule that forwards any mcast traffic */
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+       eth_zero_addr(dmac);
+       dmac[0] = 0x01;
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+               goto out;
+       }
+       esw->fdb_table.allmulti_grp = g;
+
+       /* Promiscuous group :
+        * One rule that forward all unmatched traffic from previous groups
+        */
+       eth_zero_addr(dmac);
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_MISC_PARAMETERS);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+               goto out;
+       }
+       esw->fdb_table.promisc_grp = g;
+
 out:
+       if (err) {
+               if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
+                       mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
+                       esw->fdb_table.allmulti_grp = NULL;
+               }
+               if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
+                       mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+                       esw->fdb_table.addr_grp = NULL;
+               }
+               if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+                       mlx5_destroy_flow_table(esw->fdb_table.fdb);
+                       esw->fdb_table.fdb = NULL;
+               }
+       }
+
        kfree(flow_group_in);
-       if (err && !IS_ERR_OR_NULL(fdb))
-               mlx5_destroy_flow_table(fdb);
        return err;
 }
 
@@ -438,10 +539,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
                return;
 
        esw_debug(esw->dev, "Destroy FDB Table\n");
+       mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
+       mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
        mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
        mlx5_destroy_flow_table(esw->fdb_table.fdb);
        esw->fdb_table.fdb = NULL;
        esw->fdb_table.addr_grp = NULL;
+       esw->fdb_table.allmulti_grp = NULL;
+       esw->fdb_table.promisc_grp = NULL;
 }
 
 /* E-Switch vport UC/MC lists management */
@@ -511,6 +616,52 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
        return 0;
 }
 
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+                                  struct vport_addr *vaddr,
+                                  struct esw_mc_addr *esw_mc)
+{
+       u8 *mac = vaddr->node.addr;
+       u32 vport_idx = 0;
+
+       for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+               struct mlx5_vport *vport = &esw->vports[vport_idx];
+               struct hlist_head *vport_hash = vport->mc_list;
+               struct vport_addr *iter_vaddr =
+                                       l2addr_hash_find(vport_hash,
+                                                        mac,
+                                                        struct vport_addr);
+               if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+                   vaddr->vport == vport_idx)
+                       continue;
+               switch (vaddr->action) {
+               case MLX5_ACTION_ADD:
+                       if (iter_vaddr)
+                               continue;
+                       iter_vaddr = l2addr_hash_add(vport_hash, mac,
+                                                    struct vport_addr,
+                                                    GFP_KERNEL);
+                       if (!iter_vaddr) {
+                               esw_warn(esw->dev,
+                                        "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+                                        mac, vport_idx);
+                               continue;
+                       }
+                       iter_vaddr->vport = vport_idx;
+                       iter_vaddr->flow_rule =
+                                       esw_fdb_set_vport_rule(esw,
+                                                              mac,
+                                                              vport_idx);
+                       break;
+               case MLX5_ACTION_DEL:
+                       if (!iter_vaddr)
+                               continue;
+                       mlx5_del_flow_rule(iter_vaddr->flow_rule);
+                       l2addr_hash_del(iter_vaddr);
+                       break;
+               }
+       }
+}
+
 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
 {
        struct hlist_head *hash = esw->mc_table;
@@ -531,8 +682,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
 
        esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
                esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+       /* Add this multicast mac to all the mc promiscuous vports */
+       update_allmulti_vports(esw, vaddr, esw_mc);
+
 add:
-       esw_mc->refcnt++;
+       /* If the multicast mac is added as a result of mc promiscuous vport,
+        * don't increment the multicast ref count
+        */
+       if (!vaddr->mc_promisc)
+               esw_mc->refcnt++;
+
        /* Forward MC MAC to vport */
        vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
        esw_debug(esw->dev,
@@ -568,9 +728,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
                mlx5_del_flow_rule(vaddr->flow_rule);
        vaddr->flow_rule = NULL;
 
-       if (--esw_mc->refcnt)
+       /* If the multicast mac is added as a result of mc promiscuous vport,
+        * don't decrement the multicast ref count.
+        */
+       if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
                return 0;
 
+       /* Remove this multicast mac from all the mc promiscuous vports */
+       update_allmulti_vports(esw, vaddr, esw_mc);
+
        if (esw_mc->uplink_rule)
                mlx5_del_flow_rule(esw_mc->uplink_rule);
 
@@ -643,10 +809,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
                addr->action = MLX5_ACTION_DEL;
        }
 
+       if (!vport->enabled)
+               goto out;
+
        err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
                                            mac_list, &size);
        if (err)
-               return;
+               goto out;
        esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
                  vport_num, is_uc ? "UC" : "MC", size);
 
@@ -660,6 +829,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
                addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
                if (addr) {
                        addr->action = MLX5_ACTION_NONE;
+                       /* If this mac was previously added because of allmulti
+                        * promiscuous rx mode, its now converted to be original
+                        * vport mac.
+                        */
+                       if (addr->mc_promisc) {
+                               struct esw_mc_addr *esw_mc =
+                                       l2addr_hash_find(esw->mc_table,
+                                                        mac_list[i],
+                                                        struct esw_mc_addr);
+                               if (!esw_mc) {
+                                       esw_warn(esw->dev,
+                                                "Failed to MAC(%pM) in mcast DB\n",
+                                                mac_list[i]);
+                                       continue;
+                               }
+                               esw_mc->refcnt++;
+                               addr->mc_promisc = false;
+                       }
                        continue;
                }
 
@@ -674,13 +861,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
                addr->vport = vport_num;
                addr->action = MLX5_ACTION_ADD;
        }
+out:
        kfree(mac_list);
 }
 
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+       struct mlx5_vport *vport = &esw->vports[vport_num];
+       struct l2addr_node *node;
+       struct vport_addr *addr;
+       struct hlist_head *hash;
+       struct hlist_node *tmp;
+       int hi;
+
+       hash = vport->mc_list;
+
+       for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+               u8 *mac = node->addr;
+
+               addr = l2addr_hash_find(hash, mac, struct vport_addr);
+               if (addr) {
+                       if (addr->action == MLX5_ACTION_DEL)
+                               addr->action = MLX5_ACTION_NONE;
+                       continue;
+               }
+               addr = l2addr_hash_add(hash, mac, struct vport_addr,
+                                      GFP_KERNEL);
+               if (!addr) {
+                       esw_warn(esw->dev,
+                                "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+                                mac, vport_num);
+                       continue;
+               }
+               addr->vport = vport_num;
+               addr->action = MLX5_ACTION_ADD;
+               addr->mc_promisc = true;
+       }
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+                                   bool promisc, bool mc_promisc)
+{
+       struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+       struct mlx5_vport *vport = &esw->vports[vport_num];
+
+       if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+               goto promisc;
+
+       if (mc_promisc) {
+               vport->allmulti_rule =
+                               esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+               if (!allmulti_addr->uplink_rule)
+                       allmulti_addr->uplink_rule =
+                               esw_fdb_set_vport_allmulti_rule(esw,
+                                                               UPLINK_VPORT);
+               allmulti_addr->refcnt++;
+       } else if (vport->allmulti_rule) {
+               mlx5_del_flow_rule(vport->allmulti_rule);
+               vport->allmulti_rule = NULL;
+
+               if (--allmulti_addr->refcnt > 0)
+                       goto promisc;
+
+               if (allmulti_addr->uplink_rule)
+                       mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+               allmulti_addr->uplink_rule = NULL;
+       }
+
+promisc:
+       if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+               return;
+
+       if (promisc) {
+               vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+                                                                    vport_num);
+       } else if (vport->promisc_rule) {
+               mlx5_del_flow_rule(vport->promisc_rule);
+               vport->promisc_rule = NULL;
+       }
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+       struct mlx5_vport *vport = &esw->vports[vport_num];
+       int promisc_all = 0;
+       int promisc_uc = 0;
+       int promisc_mc = 0;
+       int err;
+
+       err = mlx5_query_nic_vport_promisc(esw->dev,
+                                          vport_num,
+                                          &promisc_uc,
+                                          &promisc_mc,
+                                          &promisc_all);
+       if (err)
+               return;
+       esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+                 vport_num, promisc_all, promisc_mc);
+
+       if (!vport->trusted || !vport->enabled) {
+               promisc_uc = 0;
+               promisc_mc = 0;
+               promisc_all = 0;
+       }
+
+       esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+                               (promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
 {
-       struct mlx5_vport *vport =
-               container_of(work, struct mlx5_vport, vport_change_handler);
        struct mlx5_core_dev *dev = vport->dev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        u8 mac[ETH_ALEN];
@@ -699,6 +994,15 @@ static void esw_vport_change_handler(struct work_struct *work)
        if (vport->enabled_events & MC_ADDR_CHANGE) {
                esw_update_vport_addr_list(esw, vport->vport,
                                           MLX5_NVPRT_LIST_TYPE_MC);
+       }
+
+       if (vport->enabled_events & PROMISC_CHANGE) {
+               esw_update_vport_rx_mode(esw, vport->vport);
+               if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+                       esw_update_vport_mc_promisc(esw, vport->vport);
+       }
+
+       if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
                esw_apply_vport_addr_list(esw, vport->vport,
                                          MLX5_NVPRT_LIST_TYPE_MC);
        }
@@ -709,15 +1013,477 @@ static void esw_vport_change_handler(struct work_struct *work)
                                             vport->enabled_events);
 }
 
+static void esw_vport_change_handler(struct work_struct *work)
+{
+       struct mlx5_vport *vport =
+               container_of(work, struct mlx5_vport, vport_change_handler);
+       struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+       mutex_lock(&esw->state_lock);
+       esw_vport_change_handle_locked(vport);
+       mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+                                       struct mlx5_vport *vport)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_group *vlan_grp = NULL;
+       struct mlx5_flow_group *drop_grp = NULL;
+       struct mlx5_core_dev *dev = esw->dev;
+       struct mlx5_flow_namespace *root_ns;
+       struct mlx5_flow_table *acl;
+       void *match_criteria;
+       u32 *flow_group_in;
+       /* The egress acl table contains 2 rules:
+        * 1)Allow traffic with vlan_tag=vst_vlan_id
+        * 2)Drop all other traffic.
+        */
+       int table_size = 2;
+       int err = 0;
+
+       if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+           !IS_ERR_OR_NULL(vport->egress.acl))
+               return;
+
+       esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+                 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+       root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+       if (!root_ns) {
+               esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+               return;
+       }
+
+       flow_group_in = mlx5_vzalloc(inlen);
+       if (!flow_group_in)
+               return;
+
+       acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+       if (IS_ERR_OR_NULL(acl)) {
+               err = PTR_ERR(acl);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+       vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(vlan_grp)) {
+               err = PTR_ERR(vlan_grp);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+
+       memset(flow_group_in, 0, inlen);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+       drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(drop_grp)) {
+               err = PTR_ERR(drop_grp);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+
+       vport->egress.acl = acl;
+       vport->egress.drop_grp = drop_grp;
+       vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+       kfree(flow_group_in);
+       if (err && !IS_ERR_OR_NULL(vlan_grp))
+               mlx5_destroy_flow_group(vlan_grp);
+       if (err && !IS_ERR_OR_NULL(acl))
+               mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+                                          struct mlx5_vport *vport)
+{
+       if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+               mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+       if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+               mlx5_del_flow_rule(vport->egress.drop_rule);
+
+       vport->egress.allowed_vlan = NULL;
+       vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+                                        struct mlx5_vport *vport)
+{
+       if (IS_ERR_OR_NULL(vport->egress.acl))
+               return;
+
+       esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+       esw_vport_cleanup_egress_rules(esw, vport);
+       mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+       mlx5_destroy_flow_group(vport->egress.drop_grp);
+       mlx5_destroy_flow_table(vport->egress.acl);
+       vport->egress.allowed_vlans_grp = NULL;
+       vport->egress.drop_grp = NULL;
+       vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+                                        struct mlx5_vport *vport)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_core_dev *dev = esw->dev;
+       struct mlx5_flow_namespace *root_ns;
+       struct mlx5_flow_table *acl;
+       struct mlx5_flow_group *g;
+       void *match_criteria;
+       u32 *flow_group_in;
+       /* The ingress acl table contains 4 groups
+        * (2 active rules at the same time -
+        *      1 allow rule from one of the first 3 groups.
+        *      1 drop rule from the last group):
+        * 1)Allow untagged traffic with smac=original mac.
+        * 2)Allow untagged traffic.
+        * 3)Allow traffic with smac=original mac.
+        * 4)Drop all other traffic.
+        */
+       int table_size = 4;
+       int err = 0;
+
+       if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+           !IS_ERR_OR_NULL(vport->ingress.acl))
+               return;
+
+       esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+                 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+       root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+       if (!root_ns) {
+               esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+               return;
+       }
+
+       flow_group_in = mlx5_vzalloc(inlen);
+       if (!flow_group_in)
+               return;
+
+       acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+       if (IS_ERR_OR_NULL(acl)) {
+               err = PTR_ERR(acl);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+       vport->ingress.acl = acl;
+
+       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+       g = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+       vport->ingress.allow_untagged_spoofchk_grp = g;
+
+       memset(flow_group_in, 0, inlen);
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+       g = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+       vport->ingress.allow_untagged_only_grp = g;
+
+       memset(flow_group_in, 0, inlen);
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+       g = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+       vport->ingress.allow_spoofchk_only_grp = g;
+
+       memset(flow_group_in, 0, inlen);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+       g = mlx5_create_flow_group(acl, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+       vport->ingress.drop_grp = g;
+
+out:
+       if (err) {
+               if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+                       mlx5_destroy_flow_group(
+                                       vport->ingress.allow_spoofchk_only_grp);
+               if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+                       mlx5_destroy_flow_group(
+                                       vport->ingress.allow_untagged_only_grp);
+               if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+                       mlx5_destroy_flow_group(
+                               vport->ingress.allow_untagged_spoofchk_grp);
+               if (!IS_ERR_OR_NULL(vport->ingress.acl))
+                       mlx5_destroy_flow_table(vport->ingress.acl);
+       }
+
+       kfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+                                           struct mlx5_vport *vport)
+{
+       if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+               mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+       if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+               mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+       vport->ingress.drop_rule = NULL;
+       vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+                                         struct mlx5_vport *vport)
+{
+       if (IS_ERR_OR_NULL(vport->ingress.acl))
+               return;
+
+       esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+       esw_vport_cleanup_ingress_rules(esw, vport);
+       mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+       mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+       mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+       mlx5_destroy_flow_group(vport->ingress.drop_grp);
+       mlx5_destroy_flow_table(vport->ingress.acl);
+       vport->ingress.acl = NULL;
+       vport->ingress.drop_grp = NULL;
+       vport->ingress.allow_spoofchk_only_grp = NULL;
+       vport->ingress.allow_untagged_only_grp = NULL;
+       vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+                                   struct mlx5_vport *vport)
+{
+       u8 smac[ETH_ALEN];
+       u32 *match_v;
+       u32 *match_c;
+       int err = 0;
+       u8 *smac_v;
+
+       if (vport->spoofchk) {
+               err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+               if (err) {
+                       esw_warn(esw->dev,
+                                "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+                                vport->vport, err);
+                       return err;
+               }
+
+               if (!is_valid_ether_addr(smac)) {
+                       mlx5_core_warn(esw->dev,
+                                      "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+                                      vport->vport);
+                       return -EPERM;
+               }
+       }
+
+       esw_vport_cleanup_ingress_rules(esw, vport);
+
+       if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+               esw_vport_disable_ingress_acl(esw, vport);
+               return 0;
+       }
+
+       esw_vport_enable_ingress_acl(esw, vport);
+
+       esw_debug(esw->dev,
+                 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+                 vport->vport, vport->vlan, vport->qos);
+
+       match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       if (!match_v || !match_c) {
+               err = -ENOMEM;
+               esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+
+       if (vport->vlan || vport->qos)
+               MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+
+       if (vport->spoofchk) {
+               MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
+               MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+               smac_v = MLX5_ADDR_OF(fte_match_param,
+                                     match_v,
+                                     outer_headers.smac_47_16);
+               ether_addr_copy(smac_v, smac);
+       }
+
+       vport->ingress.allow_rule =
+               mlx5_add_flow_rule(vport->ingress.acl,
+                                  MLX5_MATCH_OUTER_HEADERS,
+                                  match_c,
+                                  match_v,
+                                  MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+                                  0, NULL);
+       if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+               err = PTR_ERR(vport->ingress.allow_rule);
+               pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+                       vport->vport, err);
+               vport->ingress.allow_rule = NULL;
+               goto out;
+       }
+
+       memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+       memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+       vport->ingress.drop_rule =
+               mlx5_add_flow_rule(vport->ingress.acl,
+                                  0,
+                                  match_c,
+                                  match_v,
+                                  MLX5_FLOW_CONTEXT_ACTION_DROP,
+                                  0, NULL);
+       if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+               err = PTR_ERR(vport->ingress.drop_rule);
+               pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+                       vport->vport, err);
+               vport->ingress.drop_rule = NULL;
+               goto out;
+       }
+
+out:
+       if (err)
+               esw_vport_cleanup_ingress_rules(esw, vport);
+
+       kfree(match_v);
+       kfree(match_c);
+       return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+                                  struct mlx5_vport *vport)
+{
+       u32 *match_v;
+       u32 *match_c;
+       int err = 0;
+
+       esw_vport_cleanup_egress_rules(esw, vport);
+
+       if (!vport->vlan && !vport->qos) {
+               esw_vport_disable_egress_acl(esw, vport);
+               return 0;
+       }
+
+       esw_vport_enable_egress_acl(esw, vport);
+
+       esw_debug(esw->dev,
+                 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+                 vport->vport, vport->vlan, vport->qos);
+
+       match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       if (!match_v || !match_c) {
+               err = -ENOMEM;
+               esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+                        vport->vport, err);
+               goto out;
+       }
+
+       /* Allowed vlan rule */
+       MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
+       MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+
+       vport->egress.allowed_vlan =
+               mlx5_add_flow_rule(vport->egress.acl,
+                                  MLX5_MATCH_OUTER_HEADERS,
+                                  match_c,
+                                  match_v,
+                                  MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+                                  0, NULL);
+       if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+               err = PTR_ERR(vport->egress.allowed_vlan);
+               pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+                       vport->vport, err);
+               vport->egress.allowed_vlan = NULL;
+               goto out;
+       }
+
+       /* Drop others rule (star rule) */
+       memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+       memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+       vport->egress.drop_rule =
+               mlx5_add_flow_rule(vport->egress.acl,
+                                  0,
+                                  match_c,
+                                  match_v,
+                                  MLX5_FLOW_CONTEXT_ACTION_DROP,
+                                  0, NULL);
+       if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+               err = PTR_ERR(vport->egress.drop_rule);
+               pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+                       vport->vport, err);
+               vport->egress.drop_rule = NULL;
+       }
+out:
+       kfree(match_v);
+       kfree(match_c);
+       return err;
+}
+
 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
                             int enable_events)
 {
        struct mlx5_vport *vport = &esw->vports[vport_num];
-       unsigned long flags;
 
+       mutex_lock(&esw->state_lock);
        WARN_ON(vport->enabled);
 
        esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+       if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+               esw_vport_ingress_config(esw, vport);
+               esw_vport_egress_config(esw, vport);
+       }
+
        mlx5_modify_vport_admin_state(esw->dev,
                                      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
                                      vport_num,
@@ -725,53 +1491,32 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        /* Sync with current vport context */
        vport->enabled_events = enable_events;
-       esw_vport_change_handler(&vport->vport_change_handler);
+       esw_vport_change_handle_locked(vport);
 
-       spin_lock_irqsave(&vport->lock, flags);
        vport->enabled = true;
-       spin_unlock_irqrestore(&vport->lock, flags);
+
+       /* only PF is trusted by default */
+       vport->trusted = (vport_num) ? false : true;
 
        arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
 
        esw->enabled_vports++;
        esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
-       struct mlx5_vport *vport = &esw->vports[vport_num];
-       struct l2addr_node *node;
-       struct vport_addr *addr;
-       struct hlist_node *tmp;
-       int hi;
-
-       for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
-               addr = container_of(node, struct vport_addr, node);
-               addr->action = MLX5_ACTION_DEL;
-       }
-       esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
-       for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
-               addr = container_of(node, struct vport_addr, node);
-               addr->action = MLX5_ACTION_DEL;
-       }
-       esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+       mutex_unlock(&esw->state_lock);
 }
 
 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 {
        struct mlx5_vport *vport = &esw->vports[vport_num];
-       unsigned long flags;
 
        if (!vport->enabled)
                return;
 
        esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
        /* Mark this vport as disabled to discard new events */
-       spin_lock_irqsave(&vport->lock, flags);
        vport->enabled = false;
-       vport->enabled_events = 0;
-       spin_unlock_irqrestore(&vport->lock, flags);
+
+       synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
 
        mlx5_modify_vport_admin_state(esw->dev,
                                      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,9 +1526,19 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
        flush_workqueue(esw->work_queue);
        /* Disable events from this vport */
        arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
-       /* We don't assume VFs will cleanup after themselves */
-       esw_cleanup_vport(esw, vport_num);
+       mutex_lock(&esw->state_lock);
+       /* We don't assume VFs will cleanup after themselves.
+        * Calling vport change handler while vport is disabled will cleanup
+        * the vport resources.
+        */
+       esw_vport_change_handle_locked(vport);
+       vport->enabled_events = 0;
+       if (vport_num) {
+               esw_vport_disable_egress_acl(esw, vport);
+               esw_vport_disable_ingress_acl(esw, vport);
+       }
        esw->enabled_vports--;
+       mutex_unlock(&esw->state_lock);
 }
 
 /* Public E-Switch API */
@@ -802,6 +1557,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
                return -ENOTSUPP;
        }
 
+       if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+               esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+       if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+               esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
        esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
 
        esw_disable_vport(esw, 0);
@@ -824,6 +1585,7 @@ abort:
 
 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
 {
+       struct esw_mc_addr *mc_promisc;
        int i;
 
        if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
@@ -833,9 +1595,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
        esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
                 esw->enabled_vports);
 
+       mc_promisc = esw->mc_promisc;
+
        for (i = 0; i < esw->total_vports; i++)
                esw_disable_vport(esw, i);
 
+       if (mc_promisc && mc_promisc->uplink_rule)
+               mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
        esw_destroy_fdb_table(esw);
 
        /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
@@ -845,7 +1612,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 {
        int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
-       int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+       int total_vports = MLX5_TOTAL_VPORTS(dev);
+       struct esw_mc_addr *mc_promisc;
        struct mlx5_eswitch *esw;
        int vport_num;
        int err;
@@ -874,6 +1642,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        }
        esw->l2_table.size = l2_table_size;
 
+       mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+       if (!mc_promisc) {
+               err = -ENOMEM;
+               goto abort;
+       }
+       esw->mc_promisc = mc_promisc;
+
        esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
        if (!esw->work_queue) {
                err = -ENOMEM;
@@ -887,6 +1662,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
                goto abort;
        }
 
+       mutex_init(&esw->state_lock);
+
        for (vport_num = 0; vport_num < total_vports; vport_num++) {
                struct mlx5_vport *vport = &esw->vports[vport_num];
 
@@ -894,7 +1671,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
                vport->dev = dev;
                INIT_WORK(&vport->vport_change_handler,
                          esw_vport_change_handler);
-               spin_lock_init(&vport->lock);
        }
 
        esw->total_vports = total_vports;
@@ -925,6 +1701,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
        esw->dev->priv.eswitch = NULL;
        destroy_workqueue(esw->work_queue);
        kfree(esw->l2_table.bitmap);
+       kfree(esw->mc_promisc);
        kfree(esw->vports);
        kfree(esw);
 }
@@ -942,10 +1719,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
        }
 
        vport = &esw->vports[vport_num];
-       spin_lock(&vport->lock);
        if (vport->enabled)
                queue_work(esw->work_queue, &vport->vport_change_handler);
-       spin_unlock(&vport->lock);
 }
 
 /* Vport Administration */
@@ -957,12 +1732,22 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               int vport, u8 mac[ETH_ALEN])
 {
        int err = 0;
+       struct mlx5_vport *evport;
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
 
+       evport = &esw->vports[vport];
+
+       if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+               mlx5_core_warn(esw->dev,
+                              "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+                              vport);
+               return -EPERM;
+       }
+
        err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
        if (err) {
                mlx5_core_warn(esw->dev,
@@ -971,6 +1756,11 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                return err;
        }
 
+       mutex_lock(&esw->state_lock);
+       if (evport->enabled)
+               err = esw_vport_ingress_config(esw, evport);
+       mutex_unlock(&esw->state_lock);
+
        return err;
 }
 
@@ -990,6 +1780,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
                                  int vport, struct ifla_vf_info *ivi)
 {
+       struct mlx5_vport *evport;
        u16 vlan;
        u8 qos;
 
@@ -998,6 +1789,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
 
+       evport = &esw->vports[vport];
+
        memset(ivi, 0, sizeof(*ivi));
        ivi->vf = vport - 1;
 
@@ -1008,7 +1801,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
        query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
        ivi->vlan = vlan;
        ivi->qos = qos;
-       ivi->spoofchk = 0;
+       ivi->spoofchk = evport->spoofchk;
 
        return 0;
 }
@@ -1016,6 +1809,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
                                int vport, u16 vlan, u8 qos)
 {
+       struct mlx5_vport *evport;
+       int err = 0;
        int set = 0;
 
        if (!ESW_ALLOWED(esw))
@@ -1026,7 +1821,72 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        if (vlan || qos)
                set = 1;
 
-       return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+       evport = &esw->vports[vport];
+
+       err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+       if (err)
+               return err;
+
+       mutex_lock(&esw->state_lock);
+       evport->vlan = vlan;
+       evport->qos = qos;
+       if (evport->enabled) {
+               err = esw_vport_ingress_config(esw, evport);
+               if (err)
+                       goto out;
+               err = esw_vport_egress_config(esw, evport);
+       }
+
+out:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+                                   int vport, bool spoofchk)
+{
+       struct mlx5_vport *evport;
+       bool pschk;
+       int err = 0;
+
+       if (!ESW_ALLOWED(esw))
+               return -EPERM;
+       if (!LEGAL_VPORT(esw, vport))
+               return -EINVAL;
+
+       evport = &esw->vports[vport];
+
+       mutex_lock(&esw->state_lock);
+       pschk = evport->spoofchk;
+       evport->spoofchk = spoofchk;
+       if (evport->enabled)
+               err = esw_vport_ingress_config(esw, evport);
+       if (err)
+               evport->spoofchk = pschk;
+       mutex_unlock(&esw->state_lock);
+
+       return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+                                int vport, bool setting)
+{
+       struct mlx5_vport *evport;
+
+       if (!ESW_ALLOWED(esw))
+               return -EPERM;
+       if (!LEGAL_VPORT(esw, vport))
+               return -EINVAL;
+
+       evport = &esw->vports[vport];
+
+       mutex_lock(&esw->state_lock);
+       evport->trusted = setting;
+       if (evport->enabled)
+               esw_vport_change_handle_locked(evport);
+       mutex_unlock(&esw->state_lock);
+
+       return 0;
 }
 
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
index 3416a428f70fe0766af3c9854b74e4ef00ce7d4b..fd6800256d4a316cfcb7d49c9f4529355e5d7b8e 100644 (file)
@@ -88,18 +88,40 @@ struct l2addr_node {
        kfree(ptr);                                         \
 })
 
+struct vport_ingress {
+       struct mlx5_flow_table *acl;
+       struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+       struct mlx5_flow_group *allow_spoofchk_only_grp;
+       struct mlx5_flow_group *allow_untagged_only_grp;
+       struct mlx5_flow_group *drop_grp;
+       struct mlx5_flow_rule  *allow_rule;
+       struct mlx5_flow_rule  *drop_rule;
+};
+
+struct vport_egress {
+       struct mlx5_flow_table *acl;
+       struct mlx5_flow_group *allowed_vlans_grp;
+       struct mlx5_flow_group *drop_grp;
+       struct mlx5_flow_rule  *allowed_vlan;
+       struct mlx5_flow_rule  *drop_rule;
+};
+
 struct mlx5_vport {
        struct mlx5_core_dev    *dev;
        int                     vport;
        struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
        struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
+       struct mlx5_flow_rule   *promisc_rule;
+       struct mlx5_flow_rule   *allmulti_rule;
        struct work_struct      vport_change_handler;
 
-       /* This spinlock protects access to vport data, between
-        * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
-        * once vport marked as disabled new interrupts are discarded.
-        */
-       spinlock_t              lock; /* vport events sync */
+       struct vport_ingress    ingress;
+       struct vport_egress     egress;
+
+       u16                     vlan;
+       u8                      qos;
+       bool                    spoofchk;
+       bool                    trusted;
        bool                    enabled;
        u16                     enabled_events;
 };
@@ -113,6 +135,8 @@ struct mlx5_l2_table {
 struct mlx5_eswitch_fdb {
        void *fdb;
        struct mlx5_flow_group *addr_grp;
+       struct mlx5_flow_group *allmulti_grp;
+       struct mlx5_flow_group *promisc_grp;
 };
 
 struct mlx5_eswitch {
@@ -124,6 +148,11 @@ struct mlx5_eswitch {
        struct mlx5_vport       *vports;
        int                     total_vports;
        int                     enabled_vports;
+       /* Synchronize between vport change events
+        * and async SRIOV admin state changes
+        */
+       struct mutex            state_lock;
+       struct esw_mc_addr      *mc_promisc;
 };
 
 /* E-Switch API */
@@ -138,6 +167,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
                                 int vport, int link_state);
 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
                                int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+                                   int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+                                int vport_num, bool setting);
 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
                                  int vport, struct ifla_vf_info *ivi);
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
index f46f1db0fc00df8d6edbb4a8c1443950d3c91eb9..9797768891ee3cae70d8a77390ebcc1fee4498f9 100644 (file)
@@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
                 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
        MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
        MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+       if (ft->vport) {
+               MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+               MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+       }
 
        memset(out, 0, sizeof(out));
        return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
 }
 
 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+                              u16 vport,
                               enum fs_flow_table_type type, unsigned int level,
                               unsigned int log_size, struct mlx5_flow_table
                               *next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
        MLX5_SET(create_flow_table_in, in, table_type, type);
        MLX5_SET(create_flow_table_in, in, level, level);
        MLX5_SET(create_flow_table_in, in, log_size, log_size);
+       if (vport) {
+               MLX5_SET(create_flow_table_in, in, vport_number, vport);
+               MLX5_SET(create_flow_table_in, in, other_vport, 1);
+       }
 
        memset(out, 0, sizeof(out));
        err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
                 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
        MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
        MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+       if (ft->vport) {
+               MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+               MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+       }
 
        return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
                                          sizeof(out));
@@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
                 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
        MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
        MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+       if (ft->vport) {
+               MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+               MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+       }
        MLX5_SET(modify_flow_table_in, in, modify_field_select,
                 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
        if (next_ft) {
@@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
                 MLX5_CMD_OP_CREATE_FLOW_GROUP);
        MLX5_SET(create_flow_group_in, in, table_type, ft->type);
        MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+       if (ft->vport) {
+               MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+               MLX5_SET(create_flow_group_in, in, other_vport, 1);
+       }
 
        err = mlx5_cmd_exec_check_status(dev, in,
                                         inlen, out,
@@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
        MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
        MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
        MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+       if (ft->vport) {
+               MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+               MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+       }
 
        return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
                                          sizeof(out));
@@ -207,6 +232,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
        MLX5_SET(set_fte_in, in, table_type, ft->type);
        MLX5_SET(set_fte_in, in, table_id,   ft->id);
        MLX5_SET(set_fte_in, in, flow_index, fte->index);
+       if (ft->vport) {
+               MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+               MLX5_SET(set_fte_in, in, other_vport, 1);
+       }
 
        in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
        MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -285,6 +314,10 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
        MLX5_SET(delete_fte_in, in, table_type, ft->type);
        MLX5_SET(delete_fte_in, in, table_id, ft->id);
        MLX5_SET(delete_fte_in, in, flow_index, index);
+       if (ft->vport) {
+               MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+               MLX5_SET(delete_fte_in, in, other_vport, 1);
+       }
 
        err =  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
 
index 9814d478480392de94b6ad50e16ad558e3cf6da9..c97b4a03eeedbdde80958f06b895a0380c54b0b7 100644 (file)
@@ -34,6 +34,7 @@
 #define _MLX5_FS_CMD_
 
 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+                              u16 vport,
                               enum fs_flow_table_type type, unsigned int level,
                               unsigned int log_size, struct mlx5_flow_table
                               *next_ft, unsigned int *table_id);
index 5121be4675d14de3ea8f7722434945caeb307eb9..659a6980cda28a410f59540bb8d69f9eceb1847f 100644 (file)
 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
                                         sizeof(struct init_tree_node))
 
-#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
                 ...) {.type = FS_TYPE_PRIO,\
        .min_ft_level = min_level_val,\
-       .max_ft = max_ft_val,\
+       .num_levels = num_levels_val,\
        .num_leaf_prios = num_prios_val,\
        .caps = caps_val,\
        .children = (struct init_tree_node[]) {__VA_ARGS__},\
        .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
 }
 
-#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\
-       ADD_PRIO(num_prios_val, 0, max_ft_val, {},\
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+       ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
                 __VA_ARGS__)\
 
 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
                               .caps = (long[]) {__VA_ARGS__} }
 
-#define LEFTOVERS_MAX_FT 1
+#define LEFTOVERS_NUM_LEVELS 1
 #define LEFTOVERS_NUM_PRIOS 1
-#define BY_PASS_PRIO_MAX_FT 1
-#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
-                          LEFTOVERS_MAX_FT)
 
-#define KERNEL_MAX_FT 3
-#define KERNEL_NUM_PRIOS 2
-#define KENREL_MIN_LEVEL 2
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+                          LEFTOVERS_NUM_PRIOS)
 
-#define ANCHOR_MAX_FT 1
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define ANCHOR_NUM_LEVELS 1
 #define ANCHOR_NUM_PRIOS 1
 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
 struct node_caps {
@@ -92,7 +95,7 @@ static struct init_tree_node {
        int min_ft_level;
        int num_leaf_prios;
        int prio;
-       int max_ft;
+       int num_levels;
 } root_fs = {
        .type = FS_TYPE_NAMESPACE,
        .ar_size = 4,
@@ -102,17 +105,20 @@ static struct init_tree_node {
                                          FS_CAP(flow_table_properties_nic_receive.modify_root),
                                          FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
                                          FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
-                        ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))),
-               ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {},
-                        ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))),
+                        ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+                                                 BY_PASS_PRIO_NUM_LEVELS))),
+               ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+                        ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+                               ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+                                                 KERNEL_NIC_PRIO_NUM_LEVELS))),
                ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
                         FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
                                          FS_CAP(flow_table_properties_nic_receive.modify_root),
                                          FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
                                          FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
-                        ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+                        ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
                ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
-                        ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
+                        ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
        }
 };
 
@@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
        return NULL;
 }
 
-static unsigned int find_next_free_level(struct fs_prio *prio)
-{
-       if (!list_empty(&prio->node.children)) {
-               struct mlx5_flow_table *ft;
-
-               ft = list_last_entry(&prio->node.children,
-                                    struct mlx5_flow_table,
-                                    node.list);
-               return ft->level + 1;
-       }
-       return prio->start_level;
-}
-
 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
 {
        unsigned int i;
@@ -464,7 +457,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
        return fg;
 }
 
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
                                                enum fs_flow_table_type table_type)
 {
        struct mlx5_flow_table *ft;
@@ -476,6 +469,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
        ft->level = level;
        ft->node.type = FS_TYPE_FLOW_TABLE;
        ft->type = table_type;
+       ft->vport = vport;
        ft->max_fte = max_fte;
        INIT_LIST_HEAD(&ft->fwd_rules);
        mutex_init(&ft->lock);
@@ -615,8 +609,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
        return err;
 }
 
-static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
-                                       struct mlx5_flow_destination *dest)
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+                                struct mlx5_flow_destination *dest)
 {
        struct mlx5_flow_table *ft;
        struct mlx5_flow_group *fg;
@@ -693,9 +687,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
        return err;
 }
 
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-                                              int prio,
-                                              int max_fte)
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+                               struct fs_prio *prio)
+{
+       struct list_head *prev = &prio->node.children;
+       struct mlx5_flow_table *iter;
+
+       fs_for_each_ft(iter, prio) {
+               if (iter->level > ft->level)
+                       break;
+               prev = &iter->node.list;
+       }
+       list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+                                                       u16 vport, int prio,
+                                                       int max_fte, u32 level)
 {
        struct mlx5_flow_table *next_ft = NULL;
        struct mlx5_flow_table *ft;
@@ -716,12 +724,16 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
                err = -EINVAL;
                goto unlock_root;
        }
-       if (fs_prio->num_ft == fs_prio->max_ft) {
+       if (level >= fs_prio->num_levels) {
                err = -ENOSPC;
                goto unlock_root;
        }
-
-       ft = alloc_flow_table(find_next_free_level(fs_prio),
+       /* The level is related to the
+        * priority level range.
+        */
+       level += fs_prio->start_level;
+       ft = alloc_flow_table(level,
+                             vport,
                              roundup_pow_of_two(max_fte),
                              root->table_type);
        if (!ft) {
@@ -732,7 +744,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
        tree_init_node(&ft->node, 1, del_flow_table);
        log_table_sz = ilog2(ft->max_fte);
        next_ft = find_next_chained_ft(fs_prio);
-       err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+       err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
                                         log_table_sz, next_ft, &ft->id);
        if (err)
                goto free_ft;
@@ -742,7 +754,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
                goto destroy_ft;
        lock_ref_node(&fs_prio->node);
        tree_add_node(&ft->node, &fs_prio->node);
-       list_add_tail(&ft->node.list, &fs_prio->node.children);
+       list_add_flow_table(ft, fs_prio);
        fs_prio->num_ft++;
        unlock_ref_node(&fs_prio->node);
        mutex_unlock(&root->chain_lock);
@@ -756,17 +768,32 @@ unlock_root:
        return ERR_PTR(err);
 }
 
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+                                              int prio, int max_fte,
+                                              u32 level)
+{
+       return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+                                                    int prio, int max_fte,
+                                                    u32 level, u16 vport)
+{
+       return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
                                                            int prio,
                                                            int num_flow_table_entries,
-                                                           int max_num_groups)
+                                                           int max_num_groups,
+                                                           u32 level)
 {
        struct mlx5_flow_table *ft;
 
        if (max_num_groups > num_flow_table_entries)
                return ERR_PTR(-EINVAL);
 
-       ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
+       ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
        if (IS_ERR(ft))
                return ft;
 
@@ -1065,31 +1092,18 @@ unlock_fg:
        return rule;
 }
 
-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
-                                                 u8 match_criteria_enable,
-                                                 u32 *match_criteria,
-                                                 u32 *match_value,
-                                                 u8 action,
-                                                 u32 flow_tag,
-                                                 struct mlx5_flow_destination *dest)
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+                         u32 action,
+                         struct mlx5_flow_table *ft)
 {
-       struct mlx5_flow_rule *rule;
-       struct mlx5_flow_group *g;
+       if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+               return true;
 
-       g = create_autogroup(ft, match_criteria_enable, match_criteria);
-       if (IS_ERR(g))
-               return (void *)g;
-
-       rule = add_rule_fg(g, match_value,
-                          action, flow_tag, dest);
-       if (IS_ERR(rule)) {
-               /* Remove assumes refcount > 0 and autogroup creates a group
-                * with a refcount = 0.
-                */
-               tree_get_node(&g->node);
-               tree_remove_node(&g->node);
-       }
-       return rule;
+       if (!dest || ((dest->type ==
+           MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+           (dest->ft->level <= ft->level)))
+               return false;
+       return true;
 }
 
 static struct mlx5_flow_rule *
@@ -1104,7 +1118,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
        struct mlx5_flow_group *g;
        struct mlx5_flow_rule *rule;
 
-       if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+       if (!dest_is_valid(dest, action, ft))
                return ERR_PTR(-EINVAL);
 
        nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
@@ -1119,8 +1133,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
                                goto unlock;
                }
 
-       rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
-                                  match_value, action, flow_tag, dest);
+       g = create_autogroup(ft, match_criteria_enable, match_criteria);
+       if (IS_ERR(g)) {
+               rule = (void *)g;
+               goto unlock;
+       }
+
+       rule = add_rule_fg(g, match_value,
+                          action, flow_tag, dest);
+       if (IS_ERR(rule)) {
+               /* Remove assumes refcount > 0 and autogroup creates a group
+                * with a refcount = 0.
+                */
+               unlock_ref_node(&ft->node);
+               tree_get_node(&g->node);
+               tree_remove_node(&g->node);
+               return rule;
+       }
 unlock:
        unlock_ref_node(&ft->node);
        return rule;
@@ -1288,7 +1317,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 {
        struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
        int prio;
-       static struct fs_prio *fs_prio;
+       struct fs_prio *fs_prio;
        struct mlx5_flow_namespace *ns;
 
        if (!root_ns)
@@ -1306,6 +1335,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
                        return &dev->priv.fdb_root_ns->ns;
                else
                        return NULL;
+       case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+               if (dev->priv.esw_egress_root_ns)
+                       return &dev->priv.esw_egress_root_ns->ns;
+               else
+                       return NULL;
+       case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+               if (dev->priv.esw_ingress_root_ns)
+                       return &dev->priv.esw_ingress_root_ns->ns;
+               else
+                       return NULL;
        default:
                return NULL;
        }
@@ -1323,7 +1362,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 EXPORT_SYMBOL(mlx5_get_flow_namespace);
 
 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
-                                     unsigned prio, int max_ft)
+                                     unsigned int prio, int num_levels)
 {
        struct fs_prio *fs_prio;
 
@@ -1334,7 +1373,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
        fs_prio->node.type = FS_TYPE_PRIO;
        tree_init_node(&fs_prio->node, 1, NULL);
        tree_add_node(&fs_prio->node, &ns->node);
-       fs_prio->max_ft = max_ft;
+       fs_prio->num_levels = num_levels;
        fs_prio->prio = prio;
        list_add_tail(&fs_prio->node.list, &ns->node.children);
 
@@ -1365,14 +1404,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
        return ns;
 }
 
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node
-                            *prio_metadata)
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+                            struct init_tree_node *prio_metadata)
 {
        struct fs_prio *fs_prio;
        int i;
 
        for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
-               fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft);
+               fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
                if (IS_ERR(fs_prio))
                        return PTR_ERR(fs_prio);
        }
@@ -1399,7 +1438,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
                                    struct init_tree_node *init_node,
                                    struct fs_node *fs_parent_node,
                                    struct init_tree_node *init_parent_node,
-                                   int index)
+                                   int prio)
 {
        int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
                                              flow_table_properties_nic_receive.
@@ -1417,8 +1456,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
 
                fs_get_obj(fs_ns, fs_parent_node);
                if (init_node->num_leaf_prios)
-                       return create_leaf_prios(fs_ns, init_node);
-               fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
+                       return create_leaf_prios(fs_ns, prio, init_node);
+               fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
                if (IS_ERR(fs_prio))
                        return PTR_ERR(fs_prio);
                base = &fs_prio->node;
@@ -1431,11 +1470,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
        } else {
                return -EINVAL;
        }
+       prio = 0;
        for (i = 0; i < init_node->ar_size; i++) {
                err = init_root_tree_recursive(dev, &init_node->children[i],
-                                              base, init_node, i);
+                                              base, init_node, prio);
                if (err)
                        return err;
+               if (init_node->children[i].type == FS_TYPE_PRIO &&
+                   init_node->children[i].num_leaf_prios) {
+                       prio += init_node->children[i].num_leaf_prios;
+               }
        }
 
        return 0;
@@ -1491,9 +1535,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
        struct fs_prio *prio;
 
        fs_for_each_prio(prio, ns) {
-                /* This updates prio start_level and max_ft */
+                /* This updates prio start_level and num_levels */
                set_prio_attrs_in_prio(prio, acc_level);
-               acc_level += prio->max_ft;
+               acc_level += prio->num_levels;
        }
        return acc_level;
 }
@@ -1505,11 +1549,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
 
        prio->start_level = acc_level;
        fs_for_each_ns(ns, prio)
-               /* This updates start_level and max_ft of ns's priority descendants */
+               /* This updates start_level and num_levels of ns's priority descendants */
                acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
-       if (!prio->max_ft)
-               prio->max_ft = acc_level_ns - prio->start_level;
-       WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
+       if (!prio->num_levels)
+               prio->num_levels = acc_level_ns - prio->start_level;
+       WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
 }
 
 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
@@ -1520,12 +1564,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
 
        fs_for_each_prio(prio, ns) {
                set_prio_attrs_in_prio(prio, start_level);
-               start_level += prio->max_ft;
+               start_level += prio->num_levels;
        }
 }
 
 #define ANCHOR_PRIO 0
 #define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
 static int create_anchor_flow_table(struct mlx5_core_dev
                                                        *dev)
 {
@@ -1535,7 +1580,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev
        ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
        if (!ns)
                return -EINVAL;
-       ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+       ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
        if (IS_ERR(ft)) {
                mlx5_core_err(dev, "Failed to create last anchor flow table");
                return PTR_ERR(ft);
@@ -1680,6 +1725,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
        cleanup_root_ns(dev);
        cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+       cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
+       cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
 }
 
 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1700,6 +1747,38 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev)
        }
 }
 
+static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+       struct fs_prio *prio;
+
+       dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
+       if (!dev->priv.esw_egress_root_ns)
+               return -ENOMEM;
+
+       /* create 1 prio*/
+       prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+       if (IS_ERR(prio))
+               return PTR_ERR(prio);
+       else
+               return 0;
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+       struct fs_prio *prio;
+
+       dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
+       if (!dev->priv.esw_ingress_root_ns)
+               return -ENOMEM;
+
+       /* create 1 prio*/
+       prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+       if (IS_ERR(prio))
+               return PTR_ERR(prio);
+       else
+               return 0;
+}
+
 int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        int err = 0;
@@ -1712,8 +1791,21 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
                err = init_fdb_root_ns(dev);
                if (err)
-                       cleanup_root_ns(dev);
+                       goto err;
+       }
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+               err = init_egress_acl_root_ns(dev);
+               if (err)
+                       goto err;
+       }
+       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+               err = init_ingress_acl_root_ns(dev);
+               if (err)
+                       goto err;
        }
 
+       return 0;
+err:
+       mlx5_cleanup_fs(dev);
        return err;
 }
index f37a6248a27bedd7b6850ecb56c2de67a97c10da..8e76cc505f5a4b06c313d820a53582af44800ea6 100644 (file)
@@ -45,8 +45,10 @@ enum fs_node_type {
 };
 
 enum fs_flow_table_type {
-       FS_FT_NIC_RX     = 0x0,
-       FS_FT_FDB        = 0X4,
+       FS_FT_NIC_RX          = 0x0,
+       FS_FT_ESW_EGRESS_ACL  = 0x2,
+       FS_FT_ESW_INGRESS_ACL = 0x3,
+       FS_FT_FDB             = 0X4,
 };
 
 enum fs_fte_status {
@@ -79,6 +81,7 @@ struct mlx5_flow_rule {
 struct mlx5_flow_table {
        struct fs_node                  node;
        u32                             id;
+       u16                             vport;
        unsigned int                    max_fte;
        unsigned int                    level;
        enum fs_flow_table_type         type;
@@ -107,7 +110,7 @@ struct fs_fte {
 /* Type of children is mlx5_flow_table/namespace */
 struct fs_prio {
        struct fs_node                  node;
-       unsigned int                    max_ft;
+       unsigned int                    num_levels;
        unsigned int                    start_level;
        unsigned int                    prio;
        unsigned int                    num_ft;
index 3f3b2fae4991025a1018f4e4e4d87c88b6a30f1a..6feef7fb9d6a9537580b604a60ab6d420d810c02 100644 (file)
@@ -48,6 +48,9 @@
 #include <linux/kmod.h>
 #include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
 #include "mlx5_core.h"
 #include "fs_core.h"
 #ifdef CONFIG_MLX5_CORE_EN
@@ -665,6 +668,12 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        struct mlx5_eq *eq, *n;
 
+#ifdef CONFIG_RFS_ACCEL
+       if (dev->rmap) {
+               free_irq_cpu_rmap(dev->rmap);
+               dev->rmap = NULL;
+       }
+#endif
        spin_lock(&table->lock);
        list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
                list_del(&eq->list);
@@ -691,6 +700,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
        INIT_LIST_HEAD(&table->comp_eqs_list);
        ncomp_vec = table->num_comp_vectors;
        nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+       dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+       if (!dev->rmap)
+               return -ENOMEM;
+#endif
        for (i = 0; i < ncomp_vec; i++) {
                eq = kzalloc(sizeof(*eq), GFP_KERNEL);
                if (!eq) {
@@ -698,6 +712,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
                        goto clean;
                }
 
+#ifdef CONFIG_RFS_ACCEL
+               irq_cpu_rmap_add(dev->rmap,
+                                dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
                snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(dev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
@@ -966,7 +984,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        int err;
 
        mutex_lock(&dev->intf_state_mutex);
-       if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+       if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
                         __func__);
                goto out;
@@ -1133,7 +1151,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        if (err)
                pr_info("failed request module on %s\n", MLX5_IB_MOD);
 
-       dev->interface_state = MLX5_INTERFACE_STATE_UP;
+       clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+       set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
 out:
        mutex_unlock(&dev->intf_state_mutex);
 
@@ -1207,7 +1226,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        }
 
        mutex_lock(&dev->intf_state_mutex);
-       if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+       if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
                         __func__);
                goto out;
@@ -1241,7 +1260,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_cmd_cleanup(dev);
 
 out:
-       dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+       clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+       set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
        mutex_unlock(&dev->intf_state_mutex);
        return err;
 }
@@ -1452,6 +1472,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
        .resume         = mlx5_pci_resume
 };
 
+static void shutdown(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+
+       dev_info(&pdev->dev, "Shutdown was called\n");
+       /* Notify mlx5 clients that the kernel is being shut down */
+       set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+       mlx5_unload_one(dev, priv);
+       mlx5_pci_disable_device(dev);
+}
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1011) },                      /* Connect-IB */
        { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},   /* Connect-IB VF */
@@ -1459,6 +1491,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4 VF */
        { PCI_VDEVICE(MELLANOX, 0x1015) },                      /* ConnectX-4LX */
        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
+       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5 */
+       { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
        { 0, }
 };
 
@@ -1469,6 +1503,7 @@ static struct pci_driver mlx5_core_driver = {
        .id_table       = mlx5_core_pci_table,
        .probe          = init_one,
        .remove         = remove_one,
+       .shutdown       = shutdown,
        .err_handler    = &mlx5_err_handler,
        .sriov_configure   = mlx5_core_sriov_configure,
 };
index 0b0b226c789e1f6ef2a43881f2c9172997639661..482604bd051cd95ecb4762b85dc0c9f816614a39 100644 (file)
@@ -42,6 +42,8 @@
 #define DRIVER_VERSION "3.0-1"
 #define DRIVER_RELDATE  "January 2015"
 
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
 extern int mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(__dev, format, ...)                              \
index ae378c575deb28073063b7d216c39664df4c4ae5..3e35611b19c30d05bc5530f06cab49743f9aa102 100644 (file)
@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
 
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+       u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+       u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(mlcr_reg, in, local_port, 1);
+       MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+
+       return mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                   sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
 int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
                              u32 *proto_cap, int proto_mask)
 {
@@ -247,8 +260,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
-                               int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+                               u16 *max_mtu, u16 *oper_mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +281,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
                *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
 }
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,20 +296,96 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
 
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
                             u8 port)
 {
        mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
 
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
                              u8 port)
 {
        mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
 
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+       u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+       u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+       int module_mapping;
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(pmlp_reg, in, local_port, 1);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+                                  MLX5_REG_PMLP, 0, 0);
+       if (err)
+               return err;
+
+       module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+       *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+       return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+                            u16 offset, u16 size, u8 *data)
+{
+       u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+       int module_num;
+       u16 i2c_addr;
+       int status;
+       int err;
+       void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+       err = mlx5_query_module_num(dev, &module_num);
+       if (err)
+               return err;
+
+       memset(in, 0, sizeof(in));
+       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+       if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+           offset + size > MLX5_EEPROM_PAGE_LENGTH)
+               /* Cross pages read, read until offset 256 in low page */
+               size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+       i2c_addr = MLX5_I2C_ADDR_LOW;
+       if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+               i2c_addr = MLX5_I2C_ADDR_HIGH;
+               offset -= MLX5_EEPROM_PAGE_LENGTH;
+       }
+
+       MLX5_SET(mcia_reg, in, l, 0);
+       MLX5_SET(mcia_reg, in, module, module_num);
+       MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+       MLX5_SET(mcia_reg, in, page_number, 0);
+       MLX5_SET(mcia_reg, in, device_address, offset);
+       MLX5_SET(mcia_reg, in, size, size);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_MCIA, 0, 0);
+       if (err)
+               return err;
+
+       status = MLX5_GET(mcia_reg, out, status);
+       if (status) {
+               mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+                             status);
+               return -EIO;
+       }
+
+       memcpy(data, ptr, size);
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
 static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
                                int pvlc_size,  u8 local_port)
 {
@@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+                                 int outlen)
+{
+       u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pcmr_reg, in, local_port, 1);
+
+       return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+                                   outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+       return mlx5_core_access_reg(mdev, in, inlen, out,
+                                   sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+       u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pcmr_reg, in, local_port, 1);
+       MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+
+       return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+                        bool *enabled)
+{
+       u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+       /* Default values for FW which do not support MLX5_REG_PCMR */
+       *supported = false;
+       *enabled = true;
+
+       if (!MLX5_CAP_GEN(mdev, ports_check))
+               return;
+
+       if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+               return;
+
+       *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+       *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
index def289375ecbf1a1543dcbbd4cf04e0a9b03ce1b..b720a274220d95793f5803620d18db8fcae2073b 100644 (file)
@@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
        mlx5_core_destroy_sq(dev, sq->qpn);
 }
 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+       u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+       u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+       err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+       if (!err)
+               *counter_id = MLX5_GET(alloc_q_counter_out, out,
+                                      counter_set_id);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+       u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(dealloc_q_counter_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+       MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+                             int reset, void *out, int out_size)
+{
+       u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+       MLX5_SET(query_q_counter_in, in, clear, reset);
+       MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+                                 u32 *out_of_buffer)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+       void *out;
+       int err;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+       if (!err)
+               *out_of_buffer = MLX5_GET(query_q_counter_out, out,
+                                         out_of_buffer);
+
+       kfree(out);
+       return err;
+}
index 8ba080e441a1863e63d4188820afbd2f1c39bd6e..5ff8af472bf5221e736e5369b4de73bfe996b0e9 100644 (file)
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-       iounmap(uar->map);
-       iounmap(uar->bf_map);
+       if (uar->map)
+               iounmap(uar->map);
+       else
+               iounmap(uar->bf_map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
index bd518405859ed3974f7049196503353c7ee647fa..b69dadcfb897a25a51b89a9caed4d1454d603d8e 100644 (file)
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
 }
 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
 
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u32 *out;
+       int err;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+       if (!err)
+               *mtu = MLX5_GET(query_nic_vport_context_out, out,
+                               nic_vport_context.mtu);
+
+       kvfree(out);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *in;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+       err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+       kvfree(in);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
                                  u32 vport,
                                  enum mlx5_list_type list_type,
index 9f10df25f3cd52d9f631fd1e0963483eb1e7073a..f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44 100644 (file)
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        return vxlan;
 }
 
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
-       if (err)
-               return err;
+       if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+               goto free_work;
 
        vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-       if (!vxlan) {
-               err = -ENOMEM;
+       if (!vxlan)
                goto err_delete_port;
-       }
 
        vxlan->udp_port = port;
 
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
        if (err)
                goto err_free;
 
-       return 0;
+       goto free_work;
 
 err_free:
        kfree(vxlan);
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-       return err;
+free_work:
+       kfree(vxlan_work);
 }
 
 static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
        kfree(vxlan);
 }
 
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
-       if (!mlx5e_vxlan_lookup_port(priv, port))
-               return;
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
+       u16 port = vxlan_work->port;
 
        __mlx5e_vxlan_core_del_port(priv, port);
+
+       kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add)
+{
+       struct mlx5e_vxlan_work *vxlan_work;
+
+       vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+       if (!vxlan_work)
+               return;
+
+       if (add)
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+       else
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+       vxlan_work->priv = priv;
+       vxlan_work->port = port;
+       vxlan_work->sa_family = sa_family;
+       queue_work(priv->wq, &vxlan_work->work);
 }
 
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
index a01685056ab156201c1945607e34bb7b53771ee3..5def12c048e38992e7edd9f870233e369ef4580e 100644 (file)
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
        u16 udp_port;
 };
 
+struct mlx5e_vxlan_work {
+       struct work_struct      work;
+       struct mlx5e_priv       *priv;
+       sa_family_t             sa_family;
+       u16                     port;
+};
+
 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 {
        return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,9 +53,10 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 }
 
 void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int  mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
 
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add);
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+
 #endif /* __MLX5_VXLAN_H__ */
index 3958195526d1557689fc1a945ee8abef9c509659..b0a0b01bb4ef76896cf632b06a0b6db4b5c9c08d 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/seq_file.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/netdevice.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
 #include <linux/skbuff.h>
 #include <linux/etherdevice.h>
 #include <linux/types.h>
@@ -55,6 +55,7 @@
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 #include <asm/byteorder.h>
 #include <net/devlink.h>
 
@@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
 
 static struct dentry *mlxsw_core_dbg_root;
 
+static struct workqueue_struct *mlxsw_wq;
+
 struct mlxsw_core_pcpu_stats {
        u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
        u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
@@ -93,11 +96,9 @@ struct mlxsw_core {
        struct list_head rx_listener_list;
        struct list_head event_listener_list;
        struct {
-               struct sk_buff *resp_skb;
-               u64 tid;
-               wait_queue_head_t wait;
-               bool trans_active;
-               struct mutex lock; /* One EMAD transaction at a time. */
+               atomic64_t tid;
+               struct list_head trans_list;
+               spinlock_t trans_list_lock; /* protects trans_list writes */
                bool use_emad;
        } emad;
        struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
@@ -290,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
                                   const struct mlxsw_reg_info *reg,
                                   enum mlxsw_core_reg_access_type type,
-                                  struct mlxsw_core *mlxsw_core)
+                                  u64 tid)
 {
        mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
        mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
@@ -306,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
                                             MLXSW_EMAD_OP_TLV_METHOD_WRITE);
        mlxsw_emad_op_tlv_class_set(op_tlv,
                                    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
-       mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+       mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
 }
 
 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
@@ -328,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
                                 const struct mlxsw_reg_info *reg,
                                 char *payload,
                                 enum mlxsw_core_reg_access_type type,
-                                struct mlxsw_core *mlxsw_core)
+                                u64 tid)
 {
        char *buf;
 
@@ -339,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
        mlxsw_emad_pack_reg_tlv(buf, reg, payload);
 
        buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
-       mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+       mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
 
        mlxsw_emad_construct_eth_hdr(skb);
 }
@@ -376,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
        return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
 }
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
-
-static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
-                                struct sk_buff *skb,
-                                const struct mlxsw_tx_info *tx_info)
-{
-       int err;
-       int ret;
-
-       mlxsw_core->emad.trans_active = true;
-
-       err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info);
-       if (err) {
-               dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
-                       mlxsw_core->emad.tid);
-               dev_kfree_skb(skb);
-               goto trans_inactive_out;
-       }
-
-       ret = wait_event_timeout(mlxsw_core->emad.wait,
-                                !(mlxsw_core->emad.trans_active),
-                                msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
-       if (!ret) {
-               dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
-                        mlxsw_core->emad.tid);
-               err = -EIO;
-               goto trans_inactive_out;
-       }
-
-       return 0;
-
-trans_inactive_out:
-       mlxsw_core->emad.trans_active = false;
-       return err;
-}
-
-static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
-                                    char *op_tlv)
+static int mlxsw_emad_process_status(char *op_tlv,
+                                    enum mlxsw_emad_op_tlv_status *p_status)
 {
-       enum mlxsw_emad_op_tlv_status status;
-       u64 tid;
-
-       status = mlxsw_emad_op_tlv_status_get(op_tlv);
-       tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+       *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
 
-       switch (status) {
+       switch (*p_status) {
        case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
                return 0;
        case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
        case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
-               dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
-                        tid, status, mlxsw_emad_op_tlv_status_str(status));
                return -EAGAIN;
        case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
        case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
@@ -438,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
        case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
        case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
        default:
-               dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
-                       tid, status, mlxsw_emad_op_tlv_status_str(status));
                return -EIO;
        }
 }
 
-static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
-                                        struct sk_buff *skb)
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+                             enum mlxsw_emad_op_tlv_status *p_status)
+{
+       return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+       struct list_head list;
+       struct list_head bulk_list;
+       struct mlxsw_core *core;
+       struct sk_buff *tx_skb;
+       struct mlxsw_tx_info tx_info;
+       struct delayed_work timeout_dw;
+       unsigned int retries;
+       u64 tid;
+       struct completion completion;
+       atomic_t active;
+       mlxsw_reg_trans_cb_t *cb;
+       unsigned long cb_priv;
+       const struct mlxsw_reg_info *reg;
+       enum mlxsw_core_reg_access_type type;
+       int err;
+       enum mlxsw_emad_op_tlv_status emad_status;
+       struct rcu_head rcu;
+};
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
-       return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+       unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+       mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
 }
 
 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
-                              struct sk_buff *skb,
-                              const struct mlxsw_tx_info *tx_info)
+                              struct mlxsw_reg_trans *trans)
 {
-       struct sk_buff *trans_skb;
-       int n_retry;
+       struct sk_buff *skb;
        int err;
 
-       n_retry = 0;
-retry:
-       /* We copy the EMAD to a new skb, since we might need
-        * to retransmit it in case of failure.
-        */
-       trans_skb = skb_copy(skb, GFP_KERNEL);
-       if (!trans_skb) {
-               err = -ENOMEM;
-               goto out;
+       skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       atomic_set(&trans->active, 1);
+       err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+       if (err) {
+               dev_kfree_skb(skb);
+               return err;
        }
+       mlxsw_emad_trans_timeout_schedule(trans);
+       return 0;
+}
 
-       err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
-       if (!err) {
-               struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+       struct mlxsw_core *mlxsw_core = trans->core;
+
+       dev_kfree_skb(trans->tx_skb);
+       spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+       list_del_rcu(&trans->list);
+       spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+       trans->err = err;
+       complete(&trans->completion);
+}
+
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+                                     struct mlxsw_reg_trans *trans)
+{
+       int err;
 
-               err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
-               if (err)
-                       dev_kfree_skb(resp_skb);
-               if (!err || err != -EAGAIN)
-                       goto out;
+       if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+               trans->retries++;
+               err = mlxsw_emad_transmit(trans->core, trans);
+               if (err == 0)
+                       return;
+       } else {
+               err = -EIO;
        }
-       if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
-               goto retry;
+       mlxsw_emad_trans_finish(trans, err);
+}
 
-out:
-       dev_kfree_skb(skb);
-       mlxsw_core->emad.tid++;
-       return err;
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+       struct mlxsw_reg_trans *trans = container_of(work,
+                                                    struct mlxsw_reg_trans,
+                                                    timeout_dw.work);
+
+       if (!atomic_dec_and_test(&trans->active))
+               return;
+
+       mlxsw_emad_transmit_retry(trans->core, trans);
 }
 
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+                                       struct mlxsw_reg_trans *trans,
+                                       struct sk_buff *skb)
+{
+       int err;
+
+       if (!atomic_dec_and_test(&trans->active))
+               return;
+
+       err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+       if (err == -EAGAIN) {
+               mlxsw_emad_transmit_retry(mlxsw_core, trans);
+       } else {
+               if (err == 0) {
+                       char *op_tlv = mlxsw_emad_op_tlv(skb);
+
+                       if (trans->cb)
+                               trans->cb(mlxsw_core,
+                                         mlxsw_emad_reg_payload(op_tlv),
+                                         trans->reg->len, trans->cb_priv);
+               }
+               mlxsw_emad_trans_finish(trans, err);
+       }
+}
+
+/* called with rcu read lock held */
 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
                                        void *priv)
 {
        struct mlxsw_core *mlxsw_core = priv;
+       struct mlxsw_reg_trans *trans;
 
-       if (mlxsw_emad_is_resp(skb) &&
-           mlxsw_core->emad.trans_active &&
-           mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
-               mlxsw_core->emad.resp_skb = skb;
-               mlxsw_core->emad.trans_active = false;
-               wake_up(&mlxsw_core->emad.wait);
-       } else {
-               dev_kfree_skb(skb);
+       if (!mlxsw_emad_is_resp(skb))
+               goto free_skb;
+
+       list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+               if (mlxsw_emad_get_tid(skb) == trans->tid) {
+                       mlxsw_emad_process_response(mlxsw_core, trans, skb);
+                       break;
+               }
        }
+
+free_skb:
+       dev_kfree_skb(skb);
 }
 
 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
@@ -528,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
 
 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 {
+       u64 tid;
        int err;
 
        /* Set the upper 32 bits of the transaction ID field to a random
         * number. This allows us to discard EMADs addressed to other
         * devices.
         */
-       get_random_bytes(&mlxsw_core->emad.tid, 4);
-       mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+       get_random_bytes(&tid, 4);
+       tid <<= 32;
+       atomic64_set(&mlxsw_core->emad.tid, tid);
 
-       init_waitqueue_head(&mlxsw_core->emad.wait);
-       mlxsw_core->emad.trans_active = false;
-       mutex_init(&mlxsw_core->emad.lock);
+       INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+       spin_lock_init(&mlxsw_core->emad.trans_list_lock);
 
        err = mlxsw_core_rx_listener_register(mlxsw_core,
                                              &mlxsw_emad_rx_listener,
@@ -597,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
        return skb;
 }
 
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+                                const struct mlxsw_reg_info *reg,
+                                char *payload,
+                                enum mlxsw_core_reg_access_type type,
+                                struct mlxsw_reg_trans *trans,
+                                struct list_head *bulk_list,
+                                mlxsw_reg_trans_cb_t *cb,
+                                unsigned long cb_priv, u64 tid)
+{
+       struct sk_buff *skb;
+       int err;
+
+       dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+               trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
+               mlxsw_core_reg_access_type_str(type));
+
+       skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+       if (!skb)
+               return -ENOMEM;
+
+       list_add_tail(&trans->bulk_list, bulk_list);
+       trans->core = mlxsw_core;
+       trans->tx_skb = skb;
+       trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+       trans->tx_info.is_emad = true;
+       INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+       trans->tid = tid;
+       init_completion(&trans->completion);
+       trans->cb = cb;
+       trans->cb_priv = cb_priv;
+       trans->reg = reg;
+       trans->type = type;
+
+       mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+       mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+       spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+       list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+       spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+       err = mlxsw_emad_transmit(mlxsw_core, trans);
+       if (err)
+               goto err_out;
+       return 0;
+
+err_out:
+       spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+       list_del_rcu(&trans->list);
+       spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+       list_del(&trans->bulk_list);
+       dev_kfree_skb(trans->tx_skb);
+       return err;
+}
+
 /*****************
  * Core functions
  *****************/
@@ -686,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
        .llseek = seq_lseek
 };
 
-static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
-                                   const char *buf, size_t size)
-{
-       __be32 *m = (__be32 *) buf;
-       int i;
-       int count = size / sizeof(__be32);
-
-       for (i = count - 1; i >= 0; i--)
-               if (m[i])
-                       break;
-       i++;
-       count = i ? i : 1;
-       for (i = 0; i < count; i += 4)
-               dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
-                       i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
-                       be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
-}
-
 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
 {
        spin_lock(&mlxsw_core_driver_list_lock);
@@ -816,9 +891,168 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
        return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
 }
 
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+                         unsigned int sb_index, u16 pool_index,
+                         struct devlink_sb_pool_info *pool_info)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+       if (!mlxsw_driver->sb_pool_get)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+                                        pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+                         unsigned int sb_index, u16 pool_index, u32 size,
+                         enum devlink_sb_threshold_type threshold_type)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+       if (!mlxsw_driver->sb_pool_set)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+                                        pool_index, size, threshold_type);
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+       return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+                                         unsigned int sb_index, u16 pool_index,
+                                         u32 *p_threshold)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_port_pool_get)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+                                             pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+                                         unsigned int sb_index, u16 pool_index,
+                                         u32 threshold)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_port_pool_set)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+                                             pool_index, threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+                                 unsigned int sb_index, u16 tc_index,
+                                 enum devlink_sb_pool_type pool_type,
+                                 u16 *p_pool_index, u32 *p_threshold)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_tc_pool_bind_get)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+                                                tc_index, pool_type,
+                                                p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+                                 unsigned int sb_index, u16 tc_index,
+                                 enum devlink_sb_pool_type pool_type,
+                                 u16 pool_index, u32 threshold)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_tc_pool_bind_set)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+                                                tc_index, pool_type,
+                                                pool_index, threshold);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+                                        unsigned int sb_index)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+       if (!mlxsw_driver->sb_occ_snapshot)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+                                         unsigned int sb_index)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+       if (!mlxsw_driver->sb_occ_max_clear)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+                                  unsigned int sb_index, u16 pool_index,
+                                  u32 *p_cur, u32 *p_max)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_occ_port_pool_get)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+                                                 pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+                                     unsigned int sb_index, u16 tc_index,
+                                     enum devlink_sb_pool_type pool_type,
+                                     u32 *p_cur, u32 *p_max)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+       struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+       if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+               return -EOPNOTSUPP;
+       return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+                                                    sb_index, tc_index,
+                                                    pool_type, p_cur, p_max);
+}
+
 static const struct devlink_ops mlxsw_devlink_ops = {
-       .port_split     = mlxsw_devlink_port_split,
-       .port_unsplit   = mlxsw_devlink_port_unsplit,
+       .port_split                     = mlxsw_devlink_port_split,
+       .port_unsplit                   = mlxsw_devlink_port_unsplit,
+       .sb_pool_get                    = mlxsw_devlink_sb_pool_get,
+       .sb_pool_set                    = mlxsw_devlink_sb_pool_set,
+       .sb_port_pool_get               = mlxsw_devlink_sb_port_pool_get,
+       .sb_port_pool_set               = mlxsw_devlink_sb_port_pool_set,
+       .sb_tc_pool_bind_get            = mlxsw_devlink_sb_tc_pool_bind_get,
+       .sb_tc_pool_bind_set            = mlxsw_devlink_sb_tc_pool_bind_set,
+       .sb_occ_snapshot                = mlxsw_devlink_sb_occ_snapshot,
+       .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
+       .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
+       .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
 };
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
@@ -1102,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
 }
 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
 
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+       return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
                                      const struct mlxsw_reg_info *reg,
                                      char *payload,
-                                     enum mlxsw_core_reg_access_type type)
+                                     enum mlxsw_core_reg_access_type type,
+                                     struct list_head *bulk_list,
+                                     mlxsw_reg_trans_cb_t *cb,
+                                     unsigned long cb_priv)
 {
+       u64 tid = mlxsw_core_tid_get(mlxsw_core);
+       struct mlxsw_reg_trans *trans;
        int err;
-       char *op_tlv;
-       struct sk_buff *skb;
-       struct mlxsw_tx_info tx_info = {
-               .local_port = MLXSW_PORT_CPU_PORT,
-               .is_emad = true,
-       };
 
-       skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
-       if (!skb)
+       trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+       if (!trans)
                return -ENOMEM;
 
-       mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
-       mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+       err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+                                   bulk_list, cb, cb_priv, tid);
+       if (err) {
+               kfree(trans);
+               return err;
+       }
+       return 0;
+}
 
-       dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
-               mlxsw_core->emad.tid);
-       mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_reg_info *reg, char *payload,
+                         struct list_head *bulk_list,
+                         mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+       return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+                                         MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+                                         bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
 
-       err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
-       if (!err) {
-               op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
-               memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
-                      reg->len);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_reg_info *reg, char *payload,
+                         struct list_head *bulk_list,
+                         mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+       return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+                                         MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+                                         bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
 
-               dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
-                       mlxsw_core->emad.tid - 1);
-               mlxsw_core_buf_dump_dbg(mlxsw_core,
-                                       mlxsw_core->emad.resp_skb->data,
-                                       mlxsw_core->emad.resp_skb->len);
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+       struct mlxsw_core *mlxsw_core = trans->core;
+       int err;
 
-               dev_kfree_skb(mlxsw_core->emad.resp_skb);
-       }
+       wait_for_completion(&trans->completion);
+       cancel_delayed_work_sync(&trans->timeout_dw);
+       err = trans->err;
 
+       if (trans->retries)
+               dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+                        trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+       if (err)
+               dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+                       trans->tid, trans->reg->id,
+                       mlxsw_reg_id_str(trans->reg->id),
+                       mlxsw_core_reg_access_type_str(trans->type),
+                       trans->emad_status,
+                       mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+       list_del(&trans->bulk_list);
+       kfree_rcu(trans, rcu);
        return err;
 }
 
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+       struct mlxsw_reg_trans *trans;
+       struct mlxsw_reg_trans *tmp;
+       int sum_err = 0;
+       int err;
+
+       list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+               err = mlxsw_reg_trans_wait(trans);
+               if (err && sum_err == 0)
+                       sum_err = err; /* first error to be returned */
+       }
+       return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
                                     const struct mlxsw_reg_info *reg,
                                     char *payload,
                                     enum mlxsw_core_reg_access_type type)
 {
+       enum mlxsw_emad_op_tlv_status status;
        int err, n_retry;
        char *in_mbox, *out_mbox, *tmp;
 
+       dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+               reg->id, mlxsw_reg_id_str(reg->id),
+               mlxsw_core_reg_access_type_str(type));
+
        in_mbox = mlxsw_cmd_mbox_alloc();
        if (!in_mbox)
                return -ENOMEM;
@@ -1162,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
                goto free_in_mbox;
        }
 
-       mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+       mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+                              mlxsw_core_tid_get(mlxsw_core));
        tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
        mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
 
@@ -1170,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
 retry:
        err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
        if (!err) {
-               err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
-               if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
-                       goto retry;
+               err = mlxsw_emad_process_status(out_mbox, &status);
+               if (err) {
+                       if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+                               goto retry;
+                       dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+                               status, mlxsw_emad_op_tlv_status_str(status));
+               }
        }
 
        if (!err)
                memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
                       reg->len);
 
-       mlxsw_core->emad.tid++;
        mlxsw_cmd_mbox_free(out_mbox);
 free_in_mbox:
        mlxsw_cmd_mbox_free(in_mbox);
+       if (err)
+               dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+                       reg->id, mlxsw_reg_id_str(reg->id),
+                       mlxsw_core_reg_access_type_str(type));
        return err;
 }
 
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+                                    char *payload, size_t payload_len,
+                                    unsigned long cb_priv)
+{
+       char *orig_payload = (char *) cb_priv;
+
+       memcpy(orig_payload, payload, payload_len);
+}
+
 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
                                 const struct mlxsw_reg_info *reg,
                                 char *payload,
                                 enum mlxsw_core_reg_access_type type)
 {
-       u64 cur_tid;
+       LIST_HEAD(bulk_list);
        int err;
 
-       if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
-               dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
-                       reg->id, mlxsw_reg_id_str(reg->id),
-                       mlxsw_core_reg_access_type_str(type));
-               return -EINTR;
-       }
-
-       cur_tid = mlxsw_core->emad.tid;
-       dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
-               cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
-               mlxsw_core_reg_access_type_str(type));
-
        /* During initialization EMAD interface is not available to us,
         * so we default to command interface. We switch to EMAD interface
         * after setting the appropriate traps.
         */
        if (!mlxsw_core->emad.use_emad)
-               err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
-                                               payload, type);
-       else
-               err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+               return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
                                                 payload, type);
 
+       err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+                                        payload, type, &bulk_list,
+                                        mlxsw_core_reg_access_cb,
+                                        (unsigned long) payload);
        if (err)
-               dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
-                       cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
-                       mlxsw_core_reg_access_type_str(type));
-
-       mutex_unlock(&mlxsw_core->emad.lock);
-       return err;
+               return err;
+       return mlxsw_reg_trans_bulk_wait(&bulk_list);
 }
 
 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
@@ -1374,6 +1666,24 @@ void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
 }
 EXPORT_SYMBOL(mlxsw_core_port_fini);
 
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+                                   const char *buf, size_t size)
+{
+       __be32 *m = (__be32 *) buf;
+       int i;
+       int count = size / sizeof(__be32);
+
+       for (i = count - 1; i >= 0; i--)
+               if (m[i])
+                       break;
+       i++;
+       count = i ? i : 1;
+       for (i = 0; i < count; i += 4)
+               dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+                       i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+                       be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
                   u32 in_mod, bool out_mbox_direct,
                   char *in_mbox, size_t in_mbox_size,
@@ -1416,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
 }
 EXPORT_SYMBOL(mlxsw_cmd_exec);
 
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
 static int __init mlxsw_core_module_init(void)
 {
-       mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
-       if (!mlxsw_core_dbg_root)
+       int err;
+
+       mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+       if (!mlxsw_wq)
                return -ENOMEM;
+       mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+       if (!mlxsw_core_dbg_root) {
+               err = -ENOMEM;
+               goto err_debugfs_create_dir;
+       }
        return 0;
+
+err_debugfs_create_dir:
+       destroy_workqueue(mlxsw_wq);
+       return err;
 }
 
 static void __exit mlxsw_core_module_exit(void)
 {
        debugfs_remove_recursive(mlxsw_core_dbg_root);
+       destroy_workqueue(mlxsw_wq);
 }
 
 module_init(mlxsw_core_module_init);
index f3cebef9c31c540eb8cd558e9686889e865c060e..436bc49df6ab5bcc23ccd2c199c3891597e24289 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/gfp.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 #include <net/devlink.h>
 
 #include "trap.h"
@@ -108,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
                                          const struct mlxsw_event_listener *el,
                                          void *priv);
 
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+                                 size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_reg_info *reg, char *payload,
+                         struct list_head *bulk_list,
+                         mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_reg_info *reg, char *payload,
+                         struct list_head *bulk_list,
+                         mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
                    const struct mlxsw_reg_info *reg, char *payload);
 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -137,11 +151,22 @@ struct mlxsw_core_port {
        struct devlink_port devlink_port;
 };
 
+static inline void *
+mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+       /* mlxsw_core_port is ensured to always be the first field in driver
+        * port structure.
+        */
+       return mlxsw_core_port;
+}
+
 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
                         struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
                         struct net_device *dev, bool split, u32 split_group);
 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
 
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+
 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
 
 struct mlxsw_swid_config {
@@ -200,6 +225,37 @@ struct mlxsw_driver {
        int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
                          unsigned int count);
        int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+       int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+                          unsigned int sb_index, u16 pool_index,
+                          struct devlink_sb_pool_info *pool_info);
+       int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+                          unsigned int sb_index, u16 pool_index, u32 size,
+                          enum devlink_sb_threshold_type threshold_type);
+       int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+                               unsigned int sb_index, u16 pool_index,
+                               u32 *p_threshold);
+       int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+                               unsigned int sb_index, u16 pool_index,
+                               u32 threshold);
+       int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+                                  unsigned int sb_index, u16 tc_index,
+                                  enum devlink_sb_pool_type pool_type,
+                                  u16 *p_pool_index, u32 *p_threshold);
+       int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+                                  unsigned int sb_index, u16 tc_index,
+                                  enum devlink_sb_pool_type pool_type,
+                                  u16 pool_index, u32 threshold);
+       int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+                              unsigned int sb_index);
+       int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+                               unsigned int sb_index);
+       int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+                                   unsigned int sb_index, u16 pool_index,
+                                   u32 *p_cur, u32 *p_max);
+       int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+                                      unsigned int sb_index, u16 tc_index,
+                                      enum devlink_sb_pool_type pool_type,
+                                      u32 *p_cur, u32 *p_max);
        void (*txhdr_construct)(struct sk_buff *skb,
                                const struct mlxsw_tx_info *tx_info);
        u8 txhdr_len;
index 57e4a6337ae31678c6a1cf7e5361ce647f127197..1977e7a5c5301cc591fade66cb290eb7dbd163e1 100644 (file)
@@ -3566,6 +3566,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
  */
 MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
 
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
 /* reg_sbcm_max_buff
  * When the pool associated to the port-pg/tclass is configured to
  * static, Maximum buffer size for the limiter configured in cells.
@@ -3632,6 +3636,27 @@ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
  */
 MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
 
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
 /* reg_sbpm_min_buff
  * Minimum buffer size for the limiter, in cells.
  * Access: RW
@@ -3652,17 +3677,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
 MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
 
 static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
-                                      enum mlxsw_reg_sbxx_dir dir,
+                                      enum mlxsw_reg_sbxx_dir dir, bool clr,
                                       u32 min_buff, u32 max_buff)
 {
        MLXSW_REG_ZERO(sbpm, payload);
        mlxsw_reg_sbpm_local_port_set(payload, local_port);
        mlxsw_reg_sbpm_pool_set(payload, pool);
        mlxsw_reg_sbpm_dir_set(payload, dir);
+       mlxsw_reg_sbpm_clr_set(payload, clr);
        mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
        mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
 }
 
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+                                        u32 *p_max_buff_occupancy)
+{
+       *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+       *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
 /* SBMM - Shared Buffer Multicast Management Register
  * --------------------------------------------------
  * The SBMM register configures and retrieves the shared buffer allocation
@@ -3718,6 +3751,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
        mlxsw_reg_sbmm_pool_set(payload, pool);
 }
 
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN +  \
+                           MLXSW_REG_SBSR_REC_LEN *    \
+                           MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
+       .id = MLXSW_REG_SBSR_ID,
+       .len = MLXSW_REG_SBSR_LEN,
+};
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+       MLXSW_REG_ZERO(sbsr, payload);
+       mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+                    0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+                    0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+                                            u32 *p_buff_occupancy,
+                                            u32 *p_max_buff_occupancy)
+{
+       *p_buff_occupancy =
+               mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+       *p_max_buff_occupancy =
+               mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
 static inline const char *mlxsw_reg_id_str(u16 reg_id)
 {
        switch (reg_id) {
@@ -3813,6 +3944,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "SBPM";
        case MLXSW_REG_SBMM_ID:
                return "SBMM";
+       case MLXSW_REG_SBSR_ID:
+               return "SBSR";
        default:
                return "*UNKNOWN*";
        }
index 19b3c144abc6f4f664e4e37dcbb6c37ba9f6760e..4a7273771028db08cc4e1bd9e76df1d05fd73785 100644 (file)
@@ -2434,6 +2434,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
 
 err_switchdev_init:
 err_lag_init:
+       mlxsw_sp_buffers_fini(mlxsw_sp);
 err_buffers_init:
 err_flood_init:
        mlxsw_sp_traps_fini(mlxsw_sp);
@@ -2449,6 +2450,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
        mlxsw_sp_switchdev_fini(mlxsw_sp);
+       mlxsw_sp_buffers_fini(mlxsw_sp);
        mlxsw_sp_traps_fini(mlxsw_sp);
        mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
        mlxsw_sp_ports_remove(mlxsw_sp);
@@ -2491,16 +2493,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
 };
 
 static struct mlxsw_driver mlxsw_sp_driver = {
-       .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
-       .owner                  = THIS_MODULE,
-       .priv_size              = sizeof(struct mlxsw_sp),
-       .init                   = mlxsw_sp_init,
-       .fini                   = mlxsw_sp_fini,
-       .port_split             = mlxsw_sp_port_split,
-       .port_unsplit           = mlxsw_sp_port_unsplit,
-       .txhdr_construct        = mlxsw_sp_txhdr_construct,
-       .txhdr_len              = MLXSW_TXHDR_LEN,
-       .profile                = &mlxsw_sp_config_profile,
+       .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
+       .owner                          = THIS_MODULE,
+       .priv_size                      = sizeof(struct mlxsw_sp),
+       .init                           = mlxsw_sp_init,
+       .fini                           = mlxsw_sp_fini,
+       .port_split                     = mlxsw_sp_port_split,
+       .port_unsplit                   = mlxsw_sp_port_unsplit,
+       .sb_pool_get                    = mlxsw_sp_sb_pool_get,
+       .sb_pool_set                    = mlxsw_sp_sb_pool_set,
+       .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
+       .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
+       .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
+       .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
+       .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
+       .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
+       .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
+       .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
+       .txhdr_construct                = mlxsw_sp_txhdr_construct,
+       .txhdr_len                      = MLXSW_TXHDR_LEN,
+       .profile                        = &mlxsw_sp_config_profile,
 };
 
 static int
@@ -2831,11 +2843,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
        lag->ref_count++;
        return 0;
 
+err_col_port_enable:
+       mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
 err_col_port_add:
        if (!lag->ref_count)
                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
-err_col_port_enable:
-       mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
        return err;
 }
 
index 361b0c270b56c39a436448e3ed22e9b7f3b83a2f..e2c022d3e2f3bf19df2084049920f40b7eac4c26 100644 (file)
@@ -65,6 +65,7 @@
 #define MLXSW_SP_BYTES_PER_CELL 96
 
 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
 
 /* Maximum delay buffer needed in case of PAUSE frames, in cells.
  * Assumes 100m cable and maximum MTU.
@@ -117,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
        return fid >= MLXSW_SP_VFID_BASE;
 }
 
+struct mlxsw_sp_sb_pr {
+       enum mlxsw_reg_sbpr_mode mode;
+       u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+       u32 cur;
+       u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+       u32 min_buff;
+       u32 max_buff;
+       u8 pool;
+       struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+       u32 min_buff;
+       u32 max_buff;
+       struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT   8
+
+struct mlxsw_sp_sb {
+       struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+       struct {
+               struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+               struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+       } ports[MLXSW_PORT_MAX_PORTS];
+};
+
 struct mlxsw_sp {
        struct {
                struct list_head list;
@@ -147,6 +182,7 @@ struct mlxsw_sp {
        struct mlxsw_sp_upper master_bridge;
        struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
        u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+       struct mlxsw_sp_sb sb;
 };
 
 static inline struct mlxsw_sp_upper *
@@ -277,7 +313,39 @@ enum mlxsw_sp_flood_table {
 };
 
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+                        unsigned int sb_index, u16 pool_index,
+                        struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+                        unsigned int sb_index, u16 pool_index, u32 size,
+                        enum devlink_sb_threshold_type threshold_type);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+                             unsigned int sb_index, u16 pool_index,
+                             u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+                             unsigned int sb_index, u16 pool_index,
+                             u32 threshold);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+                                unsigned int sb_index, u16 tc_index,
+                                enum devlink_sb_pool_type pool_type,
+                                u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+                                unsigned int sb_index, u16 tc_index,
+                                enum devlink_sb_pool_type pool_type,
+                                u16 pool_index, u32 threshold);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+                            unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+                             unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+                                 unsigned int sb_index, u16 pool_index,
+                                 u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+                                    unsigned int sb_index, u16 tc_index,
+                                    enum devlink_sb_pool_type pool_type,
+                                    u32 *p_cur, u32 *p_max);
 
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
index f58b1d3a619acd1f31befdc59d1e84c36f6f8ba2..a3720a0fad7d6ff0573256bc95d081e8d25ff2e1 100644 (file)
 #include <linux/types.h>
 #include <linux/dcbnl.h>
 #include <linux/if_ether.h>
+#include <linux/list.h>
 
 #include "spectrum.h"
 #include "core.h"
 #include "port.h"
 #include "reg.h"
 
-struct mlxsw_sp_pb {
-       u8 index;
-       u16 size;
-};
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+                                                u8 pool,
+                                                enum mlxsw_reg_sbxx_dir dir)
+{
+       return &mlxsw_sp->sb.prs[dir][pool];
+}
 
-#define MLXSW_SP_PB(_index, _size)     \
-       {                               \
-               .index = _index,        \
-               .size = _size,          \
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+                                                u8 local_port, u8 pg_buff,
+                                                enum mlxsw_reg_sbxx_dir dir)
+{
+       return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+                                                u8 local_port, u8 pool,
+                                                enum mlxsw_reg_sbxx_dir dir)
+{
+       return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
+                               enum mlxsw_reg_sbxx_dir dir,
+                               enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+       char sbpr_pl[MLXSW_REG_SBPR_LEN];
+       struct mlxsw_sp_sb_pr *pr;
+       int err;
+
+       mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+       if (err)
+               return err;
+
+       pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+       pr->mode = mode;
+       pr->size = size;
+       return 0;
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
+                               u32 min_buff, u32 max_buff, u8 pool)
+{
+       char sbcm_pl[MLXSW_REG_SBCM_LEN];
+       int err;
+
+       mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
+                           min_buff, max_buff, pool);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+       if (err)
+               return err;
+       if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
+               struct mlxsw_sp_sb_cm *cm;
+
+               cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+               cm->min_buff = min_buff;
+               cm->max_buff = max_buff;
+               cm->pool = pool;
        }
+       return 0;
+}
+
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               u8 pool, enum mlxsw_reg_sbxx_dir dir,
+                               u32 min_buff, u32 max_buff)
+{
+       char sbpm_pl[MLXSW_REG_SBPM_LEN];
+       struct mlxsw_sp_sb_pm *pm;
+       int err;
+
+       mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+                           min_buff, max_buff);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+       if (err)
+               return err;
+
+       pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+       pm->min_buff = min_buff;
+       pm->max_buff = max_buff;
+       return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                   u8 pool, enum mlxsw_reg_sbxx_dir dir,
+                                   struct list_head *bulk_list)
+{
+       char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+       mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+       return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+                                    bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+                                       char *sbpm_pl, size_t sbpm_pl_len,
+                                       unsigned long cb_priv)
+{
+       struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+       mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
 
-static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
-       MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)),
-       MLXSW_SP_PB(1, 0),
-       MLXSW_SP_PB(2, 0),
-       MLXSW_SP_PB(3, 0),
-       MLXSW_SP_PB(4, 0),
-       MLXSW_SP_PB(5, 0),
-       MLXSW_SP_PB(6, 0),
-       MLXSW_SP_PB(7, 0),
-       MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)),
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                   u8 pool, enum mlxsw_reg_sbxx_dir dir,
+                                   struct list_head *bulk_list)
+{
+       char sbpm_pl[MLXSW_REG_SBPM_LEN];
+       struct mlxsw_sp_sb_pm *pm;
+
+       pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+       mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+       return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+                                    bulk_list,
+                                    mlxsw_sp_sb_pm_occ_query_cb,
+                                    (unsigned long) pm);
+}
+
+static const u16 mlxsw_sp_pbs[] = {
+       [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
+       [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
 };
 
 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+#define MLXSW_SP_PB_UNUSED 8
 
 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
@@ -75,10 +177,9 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
                            0xffff, 0xffff / 2);
        for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
-               const struct mlxsw_sp_pb *pb;
-
-               pb = &mlxsw_sp_pbs[i];
-               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+               if (i == MLXSW_SP_PB_UNUSED)
+                       continue;
+               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
        }
        mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
                                         MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
@@ -108,181 +209,174 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
 }
 
-struct mlxsw_sp_sb_pool {
-       u8 pool;
-       enum mlxsw_reg_sbxx_dir dir;
-       enum mlxsw_reg_sbpr_mode mode;
-       u32 size;
-};
-
-#define MLXSW_SP_SB_POOL_INGRESS_SIZE                          \
+#define MLXSW_SP_SB_PR_INGRESS_SIZE                            \
        (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
-#define MLXSW_SP_SB_POOL_EGRESS_SIZE                           \
+#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP_SB_PR_EGRESS_SIZE                             \
        (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
 
-#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size)            \
-       {                                                       \
-               .pool = _pool,                                  \
-               .dir = _dir,                                    \
-               .mode = _mode,                                  \
-               .size = _size,                                  \
+#define MLXSW_SP_SB_PR(_mode, _size)   \
+       {                               \
+               .mode = _mode,          \
+               .size = _size,          \
        }
 
-#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size)                 \
-       MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS,     \
-                        MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size)                  \
-       MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS,      \
-                        MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
-       MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)),
-       MLXSW_SP_SB_POOL_INGRESS(1, 0),
-       MLXSW_SP_SB_POOL_INGRESS(2, 0),
-       MLXSW_SP_SB_POOL_INGRESS(3, 0),
-       MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)),
-       MLXSW_SP_SB_POOL_EGRESS(1, 0),
-       MLXSW_SP_SB_POOL_EGRESS(2, 0),
-       MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)),
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
+};
+
+#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 };
 
-#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
 
-static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+                                 enum mlxsw_reg_sbxx_dir dir,
+                                 const struct mlxsw_sp_sb_pr *prs,
+                                 size_t prs_len)
 {
-       char sbpr_pl[MLXSW_REG_SBPR_LEN];
        int i;
        int err;
 
-       for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
-               const struct mlxsw_sp_sb_pool *pool;
+       for (i = 0; i < prs_len; i++) {
+               const struct mlxsw_sp_sb_pr *pr;
 
-               pool = &mlxsw_sp_sb_pools[i];
-               mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
-                                   pool->mode, pool->size);
-               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+               pr = &prs[i];
+               err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
+                                          pr->mode, pr->size);
                if (err)
                        return err;
        }
        return 0;
 }
 
-struct mlxsw_sp_sb_cm {
-       union {
-               u8 pg;
-               u8 tc;
-       } u;
-       enum mlxsw_reg_sbxx_dir dir;
-       u32 min_buff;
-       u32 max_buff;
-       u8 pool;
-};
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
+{
+       int err;
+
+       err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+                                    mlxsw_sp_sb_prs_ingress,
+                                    MLXSW_SP_SB_PRS_INGRESS_LEN);
+       if (err)
+               return err;
+       return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+                                     mlxsw_sp_sb_prs_egress,
+                                     MLXSW_SP_SB_PRS_EGRESS_LEN);
+}
 
-#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool)      \
-       {                                                               \
-               .u.pg = _pg_tc,                                         \
-               .dir = _dir,                                            \
-               .min_buff = _min_buff,                                  \
-               .max_buff = _max_buff,                                  \
-               .pool = _pool,                                          \
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)    \
+       {                                               \
+               .min_buff = _min_buff,                  \
+               .max_buff = _max_buff,                  \
+               .pool = _pool,                          \
        }
 
-#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff)              \
-       MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS,                 \
-                      _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff)               \
-       MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS,                  \
-                      _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc)                            \
-       MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3)
-
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
-       MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8),
-       MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
-       MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff),
-       MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
-       MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
-       MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+       MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
 };
 
-#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(1, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
-       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_CPU_PORT_SB_CM,
 };
 
 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
        ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
 
-static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               const struct mlxsw_sp_sb_cm *cms,
-                               size_t cms_len)
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                 enum mlxsw_reg_sbxx_dir dir,
+                                 const struct mlxsw_sp_sb_cm *cms,
+                                 size_t cms_len)
 {
-       char sbcm_pl[MLXSW_REG_SBCM_LEN];
        int i;
        int err;
 
        for (i = 0; i < cms_len; i++) {
                const struct mlxsw_sp_sb_cm *cm;
 
+               if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+                       continue; /* PG number 8 does not exist, skip it */
                cm = &cms[i];
-               mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
-                                   cm->min_buff, cm->max_buff, cm->pool);
-               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+               err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
+                                          cm->min_buff, cm->max_buff,
+                                          cm->pool);
                if (err)
                        return err;
        }
@@ -291,105 +385,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 
 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
-       return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
-                                   mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
-                                   MLXSW_SP_SB_CMS_LEN);
+       int err;
+
+       err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+                                    mlxsw_sp_port->local_port,
+                                    MLXSW_REG_SBXX_DIR_INGRESS,
+                                    mlxsw_sp_sb_cms_ingress,
+                                    MLXSW_SP_SB_CMS_INGRESS_LEN);
+       if (err)
+               return err;
+       return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+                                     mlxsw_sp_port->local_port,
+                                     MLXSW_REG_SBXX_DIR_EGRESS,
+                                     mlxsw_sp_sb_cms_egress,
+                                     MLXSW_SP_SB_CMS_EGRESS_LEN);
 }
 
 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
 {
-       return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
-                                   MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+       return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+                                     mlxsw_sp_cpu_port_sb_cms,
+                                     MLXSW_SP_CPU_PORT_SB_MCS_LEN);
 }
 
-struct mlxsw_sp_sb_pm {
-       u8 pool;
-       enum mlxsw_reg_sbxx_dir dir;
-       u32 min_buff;
-       u32 max_buff;
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff)   \
+       {                                       \
+               .min_buff = _min_buff,          \
+               .max_buff = _max_buff,          \
+       }
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
 };
 
-#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff)      \
-       {                                                       \
-               .pool = _pool,                                  \
-               .dir = _dir,                                    \
-               .min_buff = _min_buff,                          \
-               .max_buff = _max_buff,                          \
-       }
+#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
 
-#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff)    \
-       MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS,       \
-                      _min_buff, _max_buff)
-
-#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff)     \
-       MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS,        \
-                      _min_buff, _max_buff)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
-       MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
-       MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
-       MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
-       MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
-       MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
-       MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
-       MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
-       MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+       MLXSW_SP_SB_PM(0, 7),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+       MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 };
 
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
 
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                      enum mlxsw_reg_sbxx_dir dir,
+                                      const struct mlxsw_sp_sb_pm *pms,
+                                      size_t pms_len)
 {
-       char sbpm_pl[MLXSW_REG_SBPM_LEN];
        int i;
        int err;
 
-       for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+       for (i = 0; i < pms_len; i++) {
                const struct mlxsw_sp_sb_pm *pm;
 
-               pm = &mlxsw_sp_sb_pms[i];
-               mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
-                                   pm->pool, pm->dir,
-                                   pm->min_buff, pm->max_buff);
-               err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
-                                     MLXSW_REG(sbpm), sbpm_pl);
+               pm = &pms[i];
+               err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
+                                          pm->min_buff, pm->max_buff);
                if (err)
                        return err;
        }
        return 0;
 }
 
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err;
+
+       err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+                                         mlxsw_sp_port->local_port,
+                                         MLXSW_REG_SBXX_DIR_INGRESS,
+                                         mlxsw_sp_sb_pms_ingress,
+                                         MLXSW_SP_SB_PMS_INGRESS_LEN);
+       if (err)
+               return err;
+       return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+                                          mlxsw_sp_port->local_port,
+                                          MLXSW_REG_SBXX_DIR_EGRESS,
+                                          mlxsw_sp_sb_pms_egress,
+                                          MLXSW_SP_SB_PMS_EGRESS_LEN);
+}
+
 struct mlxsw_sp_sb_mm {
-       u8 prio;
        u32 min_buff;
        u32 max_buff;
        u8 pool;
 };
 
-#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool)     \
-       {                                                       \
-               .prio = _prio,                                  \
-               .min_buff = _min_buff,                          \
-               .max_buff = _max_buff,                          \
-               .pool = _pool,                                  \
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)    \
+       {                                               \
+               .min_buff = _min_buff,                  \
+               .max_buff = _max_buff,                  \
+               .pool = _pool,                          \
        }
 
 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
-       MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
 };
 
 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -404,7 +513,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
                const struct mlxsw_sp_sb_mm *mc;
 
                mc = &mlxsw_sp_sb_mms[i];
-               mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+               mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
                                    mc->max_buff, mc->pool);
                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
                if (err)
@@ -413,19 +522,32 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 }
 
+#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
+
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
 {
        int err;
 
-       err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+       err = mlxsw_sp_sb_prs_init(mlxsw_sp);
        if (err)
                return err;
        err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
        if (err)
                return err;
        err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+       if (err)
+               return err;
+       return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+                                  MLXSW_SP_SB_SIZE,
+                                  MLXSW_SP_SB_POOL_COUNT,
+                                  MLXSW_SP_SB_POOL_COUNT,
+                                  MLXSW_SP_SB_TC_COUNT,
+                                  MLXSW_SP_SB_TC_COUNT);
+}
 
-       return err;
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
 }
 
 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -442,3 +564,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
        return err;
 }
+
+static u8 pool_get(u16 pool_index)
+{
+       return pool_index % MLXSW_SP_SB_POOL_COUNT;
+}
+
+static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
+{
+       u16 pool_index;
+
+       pool_index = pool;
+       if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
+               pool_index += MLXSW_SP_SB_POOL_COUNT;
+       return pool_index;
+}
+
+static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
+{
+       return pool_index < MLXSW_SP_SB_POOL_COUNT ?
+              MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+                        unsigned int sb_index, u16 pool_index,
+                        struct devlink_sb_pool_info *pool_info)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       u8 pool = pool_get(pool_index);
+       enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+       struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+       pool_info->pool_type = dir;
+       pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+       pool_info->threshold_type = pr->mode;
+       return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+                        unsigned int sb_index, u16 pool_index, u32 size,
+                        enum devlink_sb_threshold_type threshold_type)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       u8 pool = pool_get(pool_index);
+       enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+       enum mlxsw_reg_sbpr_mode mode = threshold_type;
+       u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+
+       return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
+                                    enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+{
+       struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+       if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+               return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+       return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
+                                   enum mlxsw_reg_sbxx_dir dir, u32 threshold,
+                                   u32 *p_max_buff)
+{
+       struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+       if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+               int val;
+
+               val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+               if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+                   val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+                       return -EINVAL;
+               *p_max_buff = val;
+       } else {
+               *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+       }
+       return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+                             unsigned int sb_index, u16 pool_index,
+                             u32 *p_threshold)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pool = pool_get(pool_index);
+       enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+       struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+                                                      pool, dir);
+
+       *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+                                                pm->max_buff);
+       return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+                             unsigned int sb_index, u16 pool_index,
+                             u32 threshold)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pool = pool_get(pool_index);
+       enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+       u32 max_buff;
+       int err;
+
+       err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+                                      threshold, &max_buff);
+       if (err)
+               return err;
+
+       return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+                                   0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+                                unsigned int sb_index, u16 tc_index,
+                                enum devlink_sb_pool_type pool_type,
+                                u16 *p_pool_index, u32 *p_threshold)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pg_buff = tc_index;
+       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+                                                      pg_buff, dir);
+
+       *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+                                                cm->max_buff);
+       *p_pool_index = pool_index_get(cm->pool, pool_type);
+       return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+                                unsigned int sb_index, u16 tc_index,
+                                enum devlink_sb_pool_type pool_type,
+                                u16 pool_index, u32 threshold)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pg_buff = tc_index;
+       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       u8 pool = pool_index;
+       u32 max_buff;
+       int err;
+
+       err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+                                      threshold, &max_buff);
+       if (err)
+               return err;
+
+       if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
+               if (pool < MLXSW_SP_SB_POOL_COUNT)
+                       return -EINVAL;
+               pool -= MLXSW_SP_SB_POOL_COUNT;
+       } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
+               return -EINVAL;
+       }
+       return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
+                                   0, max_buff, pool);
+}
+
+#define MASKED_COUNT_MAX \
+       (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+       u8 masked_count;
+       u8 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+                                       char *sbsr_pl, size_t sbsr_pl_len,
+                                       unsigned long cb_priv)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+       u8 masked_count;
+       u8 local_port;
+       int rec_index = 0;
+       struct mlxsw_sp_sb_cm *cm;
+       int i;
+
+       memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+       masked_count = 0;
+       for (local_port = cb_ctx.local_port_1;
+            local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+               if (!mlxsw_sp->ports[local_port])
+                       continue;
+               for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+                       cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+                                               MLXSW_REG_SBXX_DIR_INGRESS);
+                       mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+                                                 &cm->occ.cur, &cm->occ.max);
+               }
+               if (++masked_count == cb_ctx.masked_count)
+                       break;
+       }
+       masked_count = 0;
+       for (local_port = cb_ctx.local_port_1;
+            local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+               if (!mlxsw_sp->ports[local_port])
+                       continue;
+               for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+                       cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+                                               MLXSW_REG_SBXX_DIR_EGRESS);
+                       mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+                                                 &cm->occ.cur, &cm->occ.max);
+               }
+               if (++masked_count == cb_ctx.masked_count)
+                       break;
+       }
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+                            unsigned int sb_index)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+       unsigned long cb_priv;
+       LIST_HEAD(bulk_list);
+       char *sbsr_pl;
+       u8 masked_count;
+       u8 local_port_1;
+       u8 local_port = 0;
+       int i;
+       int err;
+       int err2;
+
+       sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+       if (!sbsr_pl)
+               return -ENOMEM;
+
+next_batch:
+       local_port++;
+       local_port_1 = local_port;
+       masked_count = 0;
+       mlxsw_reg_sbsr_pack(sbsr_pl, false);
+       for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+               mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+               mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+       }
+       for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+               if (!mlxsw_sp->ports[local_port])
+                       continue;
+               mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+               mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+               for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+                       err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+                                                      MLXSW_REG_SBXX_DIR_INGRESS,
+                                                      &bulk_list);
+                       if (err)
+                               goto out;
+                       err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+                                                      MLXSW_REG_SBXX_DIR_EGRESS,
+                                                      &bulk_list);
+                       if (err)
+                               goto out;
+               }
+               if (++masked_count == MASKED_COUNT_MAX)
+                       goto do_query;
+       }
+
+do_query:
+       cb_ctx.masked_count = masked_count;
+       cb_ctx.local_port_1 = local_port_1;
+       memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+       err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+                                   &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+                                   cb_priv);
+       if (err)
+               goto out;
+       if (local_port < MLXSW_PORT_MAX_PORTS)
+               goto next_batch;
+
+out:
+       err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+       if (!err)
+               err = err2;
+       kfree(sbsr_pl);
+       return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+                             unsigned int sb_index)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       LIST_HEAD(bulk_list);
+       char *sbsr_pl;
+       unsigned int masked_count;
+       u8 local_port = 0;
+       int i;
+       int err;
+       int err2;
+
+       sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+       if (!sbsr_pl)
+               return -ENOMEM;
+
+next_batch:
+       local_port++;
+       masked_count = 0;
+       mlxsw_reg_sbsr_pack(sbsr_pl, true);
+       for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+               mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+               mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+       }
+       for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+               if (!mlxsw_sp->ports[local_port])
+                       continue;
+               mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+               mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+               for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+                       err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+                                                      MLXSW_REG_SBXX_DIR_INGRESS,
+                                                      &bulk_list);
+                       if (err)
+                               goto out;
+                       err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+                                                      MLXSW_REG_SBXX_DIR_EGRESS,
+                                                      &bulk_list);
+                       if (err)
+                               goto out;
+               }
+               if (++masked_count == MASKED_COUNT_MAX)
+                       goto do_query;
+       }
+
+do_query:
+       err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+                                   &bulk_list, NULL, 0);
+       if (err)
+               goto out;
+       if (local_port < MLXSW_PORT_MAX_PORTS)
+               goto next_batch;
+
+out:
+       err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+       if (!err)
+               err = err2;
+       kfree(sbsr_pl);
+       return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+                                 unsigned int sb_index, u16 pool_index,
+                                 u32 *p_cur, u32 *p_max)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pool = pool_get(pool_index);
+       enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+       struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+                                                      pool, dir);
+
+       *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
+       *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+       return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+                                    unsigned int sb_index, u16 tc_index,
+                                    enum devlink_sb_pool_type pool_type,
+                                    u32 *p_cur, u32 *p_max)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port =
+                       mlxsw_core_port_driver_priv(mlxsw_core_port);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 local_port = mlxsw_sp_port->local_port;
+       u8 pg_buff = tc_index;
+       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+                                                      pg_buff, dir);
+
+       *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
+       *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+       return 0;
+}
index e1c74efff51ae16e585188d7d3fdbacbe071bee5..3710f19ed6bbbaa119415538c7951183fe972694 100644 (file)
@@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
                            table_type, range, local_port, set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+       if (err)
+               goto err_flood_bm_set;
+       else
+               goto buffer_out;
 
+err_flood_bm_set:
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
+                           table_type, range, local_port, !set);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
        kfree(sftr_pl);
        return err;
@@ -1430,8 +1438,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
 
 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
 {
-       schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
-                             msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+       mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
+                              msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
 }
 
 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
index 75dc46c5fca29229b5734110526137fc2c9786cb..280e761d3a975acb042568a69b6603c1616fe36c 100644 (file)
@@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
 
        /* Notify the network subsystem that the packet has been sent. */
        if (dev)
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
 }
 
 /**
@@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev)
                hw_ena_intr(hw);
        }
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
 }
 
index 86ea17e7ba7bff17f40755663e1b10b7d7b4cbcc..7066954c39d682fe229bf8c151ac46047c4eae77 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
+#include <linux/of_net.h>
 
 #include "enc28j60_hw.h"
 
 #define DRV_NAME       "enc28j60"
-#define DRV_VERSION    "1.01"
+#define DRV_VERSION    "1.02"
 
 #define SPI_OPLEN      1
 
@@ -89,22 +90,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
 {
        u8 *rx_buf = priv->spi_transfer_buf + 4;
        u8 *tx_buf = priv->spi_transfer_buf;
-       struct spi_transfer t = {
+       struct spi_transfer tx = {
                .tx_buf = tx_buf,
+               .len = SPI_OPLEN,
+       };
+       struct spi_transfer rx = {
                .rx_buf = rx_buf,
-               .len = SPI_OPLEN + len,
+               .len = len,
        };
        struct spi_message msg;
        int ret;
 
        tx_buf[0] = ENC28J60_READ_BUF_MEM;
-       tx_buf[1] = tx_buf[2] = tx_buf[3] = 0;  /* don't care */
 
        spi_message_init(&msg);
-       spi_message_add_tail(&t, &msg);
+       spi_message_add_tail(&tx, &msg);
+       spi_message_add_tail(&rx, &msg);
+
        ret = spi_sync(priv->spi, &msg);
        if (ret == 0) {
-               memcpy(data, &rx_buf[SPI_OPLEN], len);
+               memcpy(data, rx_buf, len);
                ret = msg.status;
        }
        if (ret && netif_msg_drv(priv))
@@ -1544,6 +1549,7 @@ static int enc28j60_probe(struct spi_device *spi)
 {
        struct net_device *dev;
        struct enc28j60_net *priv;
+       const void *macaddr;
        int ret = 0;
 
        if (netif_msg_drv(&debug))
@@ -1575,7 +1581,12 @@ static int enc28j60_probe(struct spi_device *spi)
                ret = -EIO;
                goto error_irq;
        }
-       eth_hw_addr_random(dev);
+
+       macaddr = of_get_mac_address(spi->dev.of_node);
+       if (macaddr)
+               ether_addr_copy(dev->dev_addr, macaddr);
+       else
+               eth_hw_addr_random(dev);
        enc28j60_set_hw_macaddr(dev);
 
        /* Board setup must set the relevant edge trigger type;
@@ -1630,9 +1641,16 @@ static int enc28j60_remove(struct spi_device *spi)
        return 0;
 }
 
+static const struct of_device_id enc28j60_dt_ids[] = {
+       { .compatible = "microchip,enc28j60" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
 static struct spi_driver enc28j60_driver = {
        .driver = {
-                  .name = DRV_NAME,
+               .name = DRV_NAME,
+               .of_match_table = enc28j60_dt_ids,
         },
        .probe = enc28j60_probe,
        .remove = enc28j60_remove,
index 7df318346b05a513b36b7805f421f3cdbb4dd3e0..42e34076d2de650399a134fb21e64c119470ee64 100644 (file)
@@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
        netif_stop_queue(dev);
 
        /* save the timestamp */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* Remember the skb for deferred processing */
        priv->tx_skb = skb;
@@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev)
        struct encx24j600_priv *priv = netdev_priv(dev);
 
        netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
-                 jiffies, jiffies - dev->trans_start);
+                 jiffies, jiffies - dev_trans_start(dev));
 
        dev->stats.tx_errors++;
        netif_wake_queue(dev);
index 3e67f451f2ab918c95d7a6c3429d45b359e18978..4367dd6879a22791a1bef1726ba8d770b03cebca 100644 (file)
@@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        priv->tx_head = TX_NEXT(tx_head);
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        ret = NETDEV_TX_OK;
 out_unlock:
        spin_unlock_irq(&priv->txlock);
index 270c9eeb7ab622955d237ae9cd96cadc5caa5f3a..6d1a956e3f779d57e83a8fd40a5136919231df2c 100644 (file)
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
 
        del_timer_sync(&mgp->watchdog_timer);
        mgp->running = MYRI10GE_ETH_STOPPING;
-       local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
        for (i = 0; i < mgp->num_slices; i++) {
                napi_disable(&mgp->ss[i].napi);
+               local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
                /* Lock the slice to prevent the busy_poll handler from
                 * accessing it.  Later when we bring the NIC up, myri10ge_open
                 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
                        pr_info("Slice %d locked\n", i);
                        mdelay(1);
                }
+               local_bh_enable();
        }
-       local_bh_enable();
        netif_carrier_off(dev);
 
        netif_tx_stop_all_queues(dev);
index 122c2ee3dfe2aaa5948efd35158bf2fdf32bc0d5..ed89029ff75bef44bd224d725ad8b76afc20d467 100644 (file)
@@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev)
        spin_unlock_irq(&np->lock);
        enable_irq(irq);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        netif_wake_queue(dev);
 }
index 1bd419dbda6dc34d39f1a4fe38ec30a9ca85cf16..612c7a44b26c6c58a16dbdd82017bfb3246bbb69 100644 (file)
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
        /* Try to restart the adaptor. */
        sonic_init(dev);
        lp->stats.tx_errors++;
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index 9ba975853ec6c712f0463d24a48a837d9c7b0152..2874dffe77deaa88f3c70af0af744d232336bbab 100644 (file)
@@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags = 0;
        u16 vlan_tag = 0;
        struct fifo_info *fifo = NULL;
-       int do_spin_lock = 1;
        int offload_type;
        int enable_per_list_interrupt = 0;
        struct config_param *config = &sp->config;
@@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
                                        queue += sp->udp_fifo_idx;
                                        if (skb->len > 1024)
                                                enable_per_list_interrupt = 1;
-                                       do_spin_lock = 0;
                                }
                        }
                }
@@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
                        [skb->priority & (MAX_TX_FIFOS - 1)];
        fifo = &mac_control->fifos[queue];
 
-       if (do_spin_lock)
-               spin_lock_irqsave(&fifo->tx_lock, flags);
-       else {
-               if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-                       return NETDEV_TX_LOCKED;
-       }
+       spin_lock_irqsave(&fifo->tx_lock, flags);
 
        if (sp->config.multiq) {
                if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
index 3d53fcf323eb002df80efd1376fb0fd2751c3891..e744acc18ef40dbfbb98f6f3b1e5d7af29c7ed42 100644 (file)
@@ -59,8 +59,8 @@
                        netdev_warn((nn)->netdev, fmt, ## args);        \
        } while (0)
 
-/* Max time to wait for NFP to respond on updates (in ms) */
-#define NFP_NET_POLL_TIMEOUT   5000
+/* Max time to wait for NFP to respond on updates (in seconds) */
+#define NFP_NET_POLL_TIMEOUT   5
 
 /* Bar allocation */
 #define NFP_NET_CRTL_BAR       0
@@ -447,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
  * @shared_name:        Name for shared interrupt
  * @me_freq_mhz:        ME clock_freq (MHz)
  * @reconfig_lock:     Protects HW reconfiguration request regs/machinery
+ * @reconfig_posted:   Pending reconfig bits coming from async sources
+ * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
+ * @reconfig_sync_present:  Some thread is performing synchronous reconfig
+ * @reconfig_timer:    Timer for async reading of reconfig results
  * @link_up:            Is the link up?
  * @link_status_lock:  Protects @link_up and ensures atomicity with BAR reading
  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
@@ -531,6 +535,10 @@ struct nfp_net {
        spinlock_t link_status_lock;
 
        spinlock_t reconfig_lock;
+       u32 reconfig_posted;
+       bool reconfig_timer_active;
+       bool reconfig_sync_present;
+       struct timer_list reconfig_timer;
 
        u32 rx_coalesce_usecs;
        u32 rx_coalesce_max_frames;
index 0bdff390c9588cf5f39e25a1a6f8f61c9f6e348e..fa47c14c743ad811252c753302d4b52e944123f8 100644 (file)
@@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
        put_unaligned_le32(reg, fw_ver);
 }
 
+/* Firmware reconfig
+ *
+ * Firmware reconfig may take a while so we have two versions of it -
+ * synchronous and asynchronous (posted).  All synchronous callers are holding
+ * RTNL so we don't have to worry about serializing them.
+ */
+static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
+{
+       nn_writel(nn, NFP_NET_CFG_UPDATE, update);
+       /* ensure update is written before pinging HW */
+       nn_pci_flush(nn);
+       nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+}
+
+/* Pass 0 as update to run posted reconfigs. */
+static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
+{
+       update |= nn->reconfig_posted;
+       nn->reconfig_posted = 0;
+
+       nfp_net_reconfig_start(nn, update);
+
+       nn->reconfig_timer_active = true;
+       mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
+}
+
+static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
+{
+       u32 reg;
+
+       reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
+       if (reg == 0)
+               return true;
+       if (reg & NFP_NET_CFG_UPDATE_ERR) {
+               nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+               return true;
+       } else if (last_check) {
+               nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+               return true;
+       }
+
+       return false;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+       bool timed_out = false;
+
+       /* Poll update field, waiting for NFP to ack the config */
+       while (!nfp_net_reconfig_check_done(nn, timed_out)) {
+               msleep(1);
+               timed_out = time_is_before_eq_jiffies(deadline);
+       }
+
+       if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
+               return -EIO;
+
+       return timed_out ? -EIO : 0;
+}
+
+static void nfp_net_reconfig_timer(unsigned long data)
+{
+       struct nfp_net *nn = (void *)data;
+
+       spin_lock_bh(&nn->reconfig_lock);
+
+       nn->reconfig_timer_active = false;
+
+       /* If sync caller is present it will take over from us */
+       if (nn->reconfig_sync_present)
+               goto done;
+
+       /* Read reconfig status and report errors */
+       nfp_net_reconfig_check_done(nn, true);
+
+       if (nn->reconfig_posted)
+               nfp_net_reconfig_start_async(nn, 0);
+done:
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig_post() - Post async reconfig request
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Record FW reconfiguration request.  Reconfiguration will be kicked off
+ * whenever reconfiguration machinery is idle.  Multiple requests can be
+ * merged together!
+ */
+static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
+{
+       spin_lock_bh(&nn->reconfig_lock);
+
+       /* Sync caller will kick off async reconf when it's done, just post */
+       if (nn->reconfig_sync_present) {
+               nn->reconfig_posted |= update;
+               goto done;
+       }
+
+       /* Opportunistically check if the previous command is done */
+       if (!nn->reconfig_timer_active ||
+           nfp_net_reconfig_check_done(nn, false))
+               nfp_net_reconfig_start_async(nn, update);
+       else
+               nn->reconfig_posted |= update;
+done:
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
 /**
  * nfp_net_reconfig() - Reconfigure the firmware
  * @nn:      NFP Net device to reconfigure
@@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
  */
 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 {
-       int cnt, ret = 0;
-       u32 new;
+       bool cancelled_timer = false;
+       u32 pre_posted_requests;
+       int ret;
 
        spin_lock_bh(&nn->reconfig_lock);
 
-       nn_writel(nn, NFP_NET_CFG_UPDATE, update);
-       /* ensure update is written before pinging HW */
-       nn_pci_flush(nn);
-       nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+       nn->reconfig_sync_present = true;
 
-       /* Poll update field, waiting for NFP to ack the config */
-       for (cnt = 0; ; cnt++) {
-               new = nn_readl(nn, NFP_NET_CFG_UPDATE);
-               if (new == 0)
-                       break;
-               if (new & NFP_NET_CFG_UPDATE_ERR) {
-                       nn_err(nn, "Reconfig error: 0x%08x\n", new);
-                       ret = -EIO;
-                       break;
-               } else if (cnt >= NFP_NET_POLL_TIMEOUT) {
-                       nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
-                              update, cnt);
-                       ret = -EIO;
-                       break;
-               }
-               mdelay(1);
+       if (nn->reconfig_timer_active) {
+               del_timer(&nn->reconfig_timer);
+               nn->reconfig_timer_active = false;
+               cancelled_timer = true;
+       }
+       pre_posted_requests = nn->reconfig_posted;
+       nn->reconfig_posted = 0;
+
+       spin_unlock_bh(&nn->reconfig_lock);
+
+       if (cancelled_timer)
+               nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+
+       /* Run the posted reconfigs which were issued before we started */
+       if (pre_posted_requests) {
+               nfp_net_reconfig_start(nn, pre_posted_requests);
+               nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
        }
 
+       nfp_net_reconfig_start(nn, update);
+       ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+
+       spin_lock_bh(&nn->reconfig_lock);
+
+       if (nn->reconfig_posted)
+               nfp_net_reconfig_start_async(nn, 0);
+
+       nn->reconfig_sync_present = false;
+
        spin_unlock_bh(&nn->reconfig_lock);
+
        return ret;
 }
 
@@ -1298,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
 
+               /*         < meta_len >
+                *  <-- [rx_offset] -->
+                *  ---------------------------------------------------------
+                * | [XX] |  metadata  |             packet           | XXXX |
+                *  ---------------------------------------------------------
+                *         <---------------- data_len --------------->
+                *
+                * The rx_offset is fixed for all packets, the meta_len can vary
+                * on a packet by packet basis. If rx_offset is set to zero
+                * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+                * buffer and is immediately followed by the packet (no [XX]).
+                */
                meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
                data_len = le16_to_cpu(rxd->rxd.data_len);
 
-               if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
-
-               if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
-                       /* The packet data starts after the metadata */
+               if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
                        skb_reserve(skb, meta_len);
-               } else {
-                       /* The packet data starts at a fixed offset */
+               else
                        skb_reserve(skb, nn->rx_offset);
-               }
-
-               /* Adjust the SKB for the dynamic meta data pre-pended */
                skb_put(skb, data_len - meta_len);
 
                nfp_net_set_hash(nn->netdev, skb, rxd);
@@ -2094,8 +2216,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
                return;
 
        nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
-       if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
-               return;
+       nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
 
        nn->ctrl = new_ctrl;
 }
@@ -2403,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
                          be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
                          be16_to_cpu(nn->vxlan_ports[i]));
 
-       nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+       nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
 }
 
 /**
@@ -2549,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
        spin_lock_init(&nn->reconfig_lock);
        spin_lock_init(&nn->link_status_lock);
 
+       setup_timer(&nn->reconfig_timer,
+                   nfp_net_reconfig_timer, (unsigned long)nn);
+
        return nn;
 }
 
index 8692003aeed886418cb17ac641e74f7849668541..ad6c4e31ceddf82cac729d2b4449d9b7bd121394 100644 (file)
 
 /**
  * @NFP_NET_TXR_MAX:         Maximum number of TX rings
- * @NFP_NET_TXR_MASK:        Mask for TX rings
  * @NFP_NET_RXR_MAX:         Maximum number of RX rings
- * @NFP_NET_RXR_MASK:        Mask for RX rings
  */
 #define NFP_NET_TXR_MAX                 64
-#define NFP_NET_TXR_MASK                (NFP_NET_TXR_MAX - 1)
 #define NFP_NET_RXR_MAX                 64
-#define NFP_NET_RXR_MASK                (NFP_NET_RXR_MAX - 1)
 
 /**
  * Read/Write config words (0x0000 - 0x002c)
  * @NFP_NET_CFG_VERSION:     Firmware version number
  * @NFP_NET_CFG_STS:         Status
  * @NFP_NET_CFG_CAP:         Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_MAX_TXRINGS:     Maximum number of TX rings
- * @NFP_NET_MAX_RXRINGS:     Maximum number of RX rings
- * @NFP_NET_MAX_MTU:         Maximum support MTU
+ * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_CFG_MAX_MTU:     Maximum support MTU
  * @NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
  * @NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
  *
index f86a1f13d27bd259035269cb2c7b45deee982731..f7c9a5bc4aa334ee0f35f99b6b590795ce612149 100644 (file)
@@ -187,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = {
 
 void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
 {
-       static struct dentry *queues, *tx, *rx;
+       struct dentry *queues, *tx, *rx;
        char int_name[16];
        int i;
 
@@ -200,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
 
        /* Create queue debugging sub-tree */
        queues = debugfs_create_dir("queue", nn->debugfs_dir);
-       if (IS_ERR_OR_NULL(nn->debugfs_dir))
+       if (IS_ERR_OR_NULL(queues))
                return;
 
        rx = debugfs_create_dir("rx", queues);
index 52d9a94aebb9af0aacfe021454df32dfbfa14675..87b7b814778b085e9a38eaa15a4b0ade23b4ddb0 100644 (file)
@@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev)
 
        w90p910_init_desc(dev);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        ether->cur_tx = 0x0;
        ether->finish_tx = 0x0;
        ether->cur_rx = 0x0;
@@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev)
        w90p910_trigger_tx(dev);
        w90p910_trigger_rx(dev);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 
        if (netif_queue_stopped(dev))
                netif_wake_queue(dev);
index 2a55d6d53ee61635806725d5167638b926f63363..8d710a3b4db0626e44ce019d115c31a061bcb673 100644 (file)
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {
 
 /**
  * struct pch_gbe_tx_ring - tx ring information
- * @tx_lock:   spinlock structs
  * @desc:      pointer to the descriptor ring memory
  * @dma:       physical address of the descriptor ring
  * @size:      length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
  * @buffer_info:       array of buffer information structs
  */
 struct pch_gbe_tx_ring {
-       spinlock_t tx_lock;
        struct pch_gbe_tx_desc *desc;
        dma_addr_t dma;
        unsigned int size;
index 3b98b263bad0d8de574b5a9d3781a765aa808e36..3cd87a41ac92c42f4f75716753dccd57eaba9c1a 100644 (file)
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
                   cleaned_count);
        if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
                /* Recover from running out of Tx resources in xmit_frame */
-               spin_lock(&tx_ring->tx_lock);
+               netif_tx_lock(adapter->netdev);
                if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
                {
                        netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 
                netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
                           tx_ring->next_to_clean);
-               spin_unlock(&tx_ring->tx_lock);
+               netif_tx_unlock(adapter->netdev);
        }
        return cleaned;
 }
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       spin_lock_init(&tx_ring->tx_lock);
 
        for (desNo = 0; desNo < tx_ring->count; desNo++) {
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,15 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
-       unsigned long flags;
 
-       if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
-               /* Collision - tell upper layer to requeue */
-               return NETDEV_TX_LOCKED;
-       }
        if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
                netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                netdev_dbg(netdev,
                           "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
                           tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2152,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        /* CRC,ITAG no support */
        pch_gbe_tx_queue(adapter, tx_ring, skb);
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
index 13d88a6025c82a89fb290bc6930cd4055bd1535b..91be2f02ef1cf9d5654a5e7a124506908637a951 100644 (file)
@@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
        hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
 
        /* Trigger an immediate transmit demand. */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
 
        /* Restart the chip's Tx/Rx processes . */
index fa2db41e02f85d472d89b30ddd22be53bee85280..fb1d1031b0916044ea48ed1d287465827fb0d578 100644 (file)
@@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
                netif_wake_queue (dev);         /* Typical path */
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
 }
 
index ddcfcab034c23b122596b5e733708d8666737ae6..c0a11b5158e7d3fdbdb8bbfd1c30bb3ab56d078e 100644 (file)
@@ -103,4 +103,25 @@ config QEDE
        depends on QED
        ---help---
          This enables the support for ...
+
+config QEDE_VXLAN
+       bool "Virtual eXtensible Local Area Network support"
+       default n
+       depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
+       ---help---
+         This enables hardware offload support for VXLAN protocol over
+         qede module. Say Y here if you want to enable hardware offload
+         support for Virtual eXtensible Local Area Network (VXLAN)
+         in the driver.
+
+config QEDE_GENEVE
+       bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+       depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
+       ---help---
+         This allows one to create GENEVE virtual interfaces that provide
+         Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+         to tunnel virtual network infrastructure in virtualized environments.
+         Say Y here if you want to enable hardware offload support for
+         Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
 endif # NET_VENDOR_QLOGIC
index db80eb1c6d4fc5ebccea52aa86e87a7578ead04f..2b10f1bcd1517458b2cfcb22e6ce4677995181a8 100644 (file)
@@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
 {
        int i, v, addr;
        __le32 *ptr32;
+       int ret;
 
        addr = base;
        ptr32 = buf;
        for (i = 0; i < size / sizeof(u32); i++) {
-               if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-                       return -1;
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
+
                *ptr32 = cpu_to_le32(v);
                ptr32++;
                addr += sizeof(u32);
        }
        if ((char *)buf + size > (char *)ptr32) {
                __le32 local;
-               if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-                       return -1;
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
                local = cpu_to_le32(v);
                memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
        }
@@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
                                if (adapter->phy_read &&
                                    adapter->phy_read(adapter,
                                                      NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                                     &autoneg) != 0)
+                                                     &autoneg) == 0)
                                        adapter->link_autoneg = autoneg;
                        } else
                                goto link_down;
index fd362b6923f48b9fff5c55fdcf7fd241d94ef9aa..7a0281a36c2818a002a8fc2bc00ceb6ae7221921 100644 (file)
@@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter)
        ptr32 = (__le32 *)&serial_num;
        offset = NX_FW_SERIAL_NUM_OFFSET;
        for (i = 0; i < 8; i++) {
-               if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
+               err = netxen_rom_fast_read(adapter, offset, &val);
+               if (err) {
                        dev_err(&pdev->dev, "error reading board info\n");
                        adapter->driver_mismatch = 1;
                        return;
@@ -2285,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
                        goto request_reset;
                }
        }
-       adapter->netdev->trans_start = jiffies;
+       netif_trans_update(adapter->netdev);
        rtnl_unlock();
        return;
 
index 5c2fd57236fe42d90eb8cae64473557089c1a583..aafa6692e62f51c083e712cede99c0ca72438a85 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_QED) := qed.o
 
 qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
-        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+        qed_selftest.o
index fcb8e9ba51d9b6ad85ecf6bd2361e71896c6b592..cceac3272cce63c39dd7635022372b3a07e60239 100644 (file)
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
 #define VER_SIZE 16
 
+#define QED_WFQ_UNIT   100
+
 /* cau states */
 enum qed_coalescing_mode {
        QED_COAL_MODE_DISABLE,
@@ -74,6 +76,51 @@ struct qed_rt_data {
        bool    *b_valid;
 };
 
+enum qed_tunn_mode {
+       QED_MODE_L2GENEVE_TUNN,
+       QED_MODE_IPGENEVE_TUNN,
+       QED_MODE_L2GRE_TUNN,
+       QED_MODE_IPGRE_TUNN,
+       QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+       QED_TUNN_CLSS_MAC_VLAN,
+       QED_TUNN_CLSS_MAC_VNI,
+       QED_TUNN_CLSS_INNER_MAC_VLAN,
+       QED_TUNN_CLSS_INNER_MAC_VNI,
+       MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+       unsigned long   tunn_mode;
+       u16             vxlan_udp_port;
+       u16             geneve_udp_port;
+       u8              update_vxlan_udp_port;
+       u8              update_geneve_udp_port;
+       u8              tunn_clss_vxlan;
+       u8              tunn_clss_l2geneve;
+       u8              tunn_clss_ipgeneve;
+       u8              tunn_clss_l2gre;
+       u8              tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+       unsigned long   tunn_mode_update_mask;
+       unsigned long   tunn_mode;
+       u16             vxlan_udp_port;
+       u16             geneve_udp_port;
+       u8              update_rx_pf_clss;
+       u8              update_tx_pf_clss;
+       u8              update_vxlan_udp_port;
+       u8              update_geneve_udp_port;
+       u8              tunn_clss_vxlan;
+       u8              tunn_clss_l2geneve;
+       u8              tunn_clss_ipgeneve;
+       u8              tunn_clss_l2gre;
+       u8              tunn_clss_ipgre;
+};
+
 /* The PCI personality is not quite synonymous to protocol ID:
  * 1. All personalities need CORE connections
  * 2. The Ethernet personality may support also the RoCE protocol
@@ -192,6 +239,12 @@ struct qed_dmae_info {
        struct dmae_cmd *p_dmae_cmd;
 };
 
+struct qed_wfq_data {
+       /* when feature is configured for at least 1 vport */
+       u32     min_speed;
+       bool    configured;
+};
+
 struct qed_qm_info {
        struct init_qm_pq_params        *qm_pq_params;
        struct init_qm_vport_params     *qm_vport_params;
@@ -212,6 +265,7 @@ struct qed_qm_info {
        bool                            vport_wfq_en;
        u8                              pf_wfq;
        u32                             pf_rl;
+       struct qed_wfq_data             *wfq_data;
 };
 
 struct storm_stats {
@@ -430,6 +484,7 @@ struct qed_dev {
        u8                              num_hwfns;
        struct qed_hwfn                 hwfns[MAX_HWFNS_PER_DEVICE];
 
+       unsigned long                   tunn_mode;
        u32                             drv_type;
 
        struct qed_eth_stats            *reset_stats;
@@ -480,6 +535,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
 
 #define PURE_LB_TC 8
 
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
 /* Other Linux specific common definitions */
@@ -507,6 +564,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
 
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
 
-#define QED_ETH_INTERFACE_VERSION       300
-
 #endif /* _QED_H */
index b7d100f6bd6fac64d90977b920a7a29150cd5427..b500c86d7d06670e47530272d11175a57caafe12 100644 (file)
@@ -105,6 +105,8 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
        qm_info->qm_vport_params = NULL;
        kfree(qm_info->qm_port_params);
        qm_info->qm_port_params = NULL;
+       kfree(qm_info->wfq_data);
+       qm_info->wfq_data = NULL;
 }
 
 void qed_resc_free(struct qed_dev *cdev)
@@ -175,6 +177,11 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
        if (!qm_info->qm_port_params)
                goto alloc_err;
 
+       qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
+                                   GFP_KERNEL);
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
        vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
 
        /* First init per-TC PQs */
@@ -213,18 +220,19 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 
        qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
 
+       for (i = 0; i < qm_info->num_vports; i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+
        qm_info->pf_wfq = 0;
        qm_info->pf_rl = 0;
        qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
 
        return 0;
 
 alloc_err:
        DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
-       kfree(qm_info->qm_pq_params);
-       kfree(qm_info->qm_vport_params);
-       kfree(qm_info->qm_port_params);
-
+       qed_qm_info_free(p_hwfn);
        return -ENOMEM;
 }
 
@@ -558,6 +566,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
 
 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
+                         struct qed_tunn_start_params *p_tunn,
                          int hw_mode,
                          bool b_hw_start,
                          enum qed_int_mode int_mode,
@@ -574,7 +583,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                        p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
 
                /* Update rate limit once we'll actually have a link */
-               p_hwfn->qm_info.pf_rl = 100;
+               p_hwfn->qm_info.pf_rl = 100000;
        }
 
        qed_cxt_hw_init_pf(p_hwfn);
@@ -625,7 +634,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
 
                /* send function start command */
-               rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+               rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
                if (rc)
                        DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
        }
@@ -672,6 +681,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
 }
 
 int qed_hw_init(struct qed_dev *cdev,
+               struct qed_tunn_start_params *p_tunn,
                bool b_hw_start,
                enum qed_int_mode int_mode,
                bool allow_npar_tx_switch,
@@ -724,7 +734,7 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Fall into */
                case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                        rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
-                                           p_hwfn->hw_info.hw_mode,
+                                           p_tunn, p_hwfn->hw_info.hw_mode,
                                            b_hw_start, int_mode,
                                            allow_npar_tx_switch);
                        break;
@@ -1593,3 +1603,312 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
 
        return 0;
 }
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                            struct qed_ptt *p_ptt,
+                                            u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+               vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+                                               min_pf_rate;
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+                                      u32 min_pf_rate)
+
+{
+       int i;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+               p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                          struct qed_ptt *p_ptt,
+                                          u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ *    rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+                             u16 vport_id, u32 req_rate,
+                             u32 min_pf_rate)
+{
+       u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+       int non_requested_count = 0, req_count = 0, i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Accounting for the vports which are configured for WFQ explicitly */
+       for (i = 0; i < num_vports; i++) {
+               u32 tmp_speed;
+
+               if ((i != vport_id) &&
+                   p_hwfn->qm_info.wfq_data[i].configured) {
+                       req_count++;
+                       tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+                       total_req_min_rate += tmp_speed;
+               }
+       }
+
+       /* Include current vport data as well */
+       req_count++;
+       total_req_min_rate += req_rate;
+       non_requested_count = num_vports - req_count;
+
+       if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       if (num_vports > QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Number of vports is greater than %d\n",
+                          QED_WFQ_UNIT);
+               return -EINVAL;
+       }
+
+       if (total_req_min_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+                          total_req_min_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       total_left_rate = min_pf_rate - total_req_min_rate;
+
+       left_rate_per_vp = total_left_rate / non_requested_count;
+       if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          left_rate_per_vp, min_pf_rate);
+               return -EINVAL;
+       }
+
+       p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+       p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+       }
+
+       return 0;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+                                                struct qed_ptt *p_ptt,
+                                                u32 min_pf_rate)
+{
+       bool use_wfq = false;
+       int rc = 0;
+       u16 i;
+
+       /* Validate all pre configured vports for wfq */
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 rate;
+
+               if (!p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+               use_wfq = true;
+
+               rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "WFQ validation failed while configuring min rate\n");
+                       break;
+               }
+       }
+
+       if (!rc && use_wfq)
+               qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+       else
+               qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+       return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                     p_hwfn->p_dpc_ptt,
+                                                     min_pf_rate);
+       }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 max_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+       if (!p_link->line_speed && (max_bw != 100))
+               return rc;
+
+       p_link->speed = (p_link->line_speed * max_bw) / 100;
+       p_hwfn->qm_info.pf_rl = p_link->speed;
+
+       /* Since the limiter also affects Tx-switched traffic, we don't want it
+        * to limit such traffic in case there's no actual limit.
+        * In that case, set limit to imaginary high boundary.
+        */
+       if (max_bw == 100)
+               p_hwfn->qm_info.pf_rl = 100000;
+
+       rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                           p_hwfn->qm_info.pf_rl);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MAX bandwidth to be %08x Mb/sec\n",
+                  p_link->speed);
+
+       return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (max_bw < 1 || max_bw > 100) {
+               DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, max_bw);
+
+               qed_ptt_release(p_hwfn, p_ptt);
+
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 min_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+       p_hwfn->qm_info.pf_wfq = min_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+       rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MIN bandwidth to be %d Mb/sec\n",
+                  p_link->min_pf_rate);
+
+       return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (min_bw < 1 || min_bw > 100) {
+               DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, min_bw);
+               if (rc) {
+                       qed_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               if (p_link->min_pf_rate) {
+                       u32 min_rate = p_link->min_pf_rate;
+
+                       rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                                  p_ptt,
+                                                                  min_rate);
+               }
+
+               qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
index d6c7ddf4f4d4b54bd23563ba43b125bd9cd64af7..6aac3f855aa1b355aca92ba79860335ff2502d2c 100644 (file)
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
  * @brief qed_hw_init -
  *
  * @param cdev
+ * @param p_tunn
  * @param b_hw_start
  * @param int_mode - interrupt mode [msix, inta, etc.] to use.
  * @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
  * @return int
  */
 int qed_hw_init(struct qed_dev *cdev,
+               struct qed_tunn_start_params *p_tunn,
                bool b_hw_start,
                enum qed_int_mode int_mode,
                bool allow_npar_tx_switch,
index a368f5e71d958bd1979236e9022dae07843479e0..c4fae71bed11f3cdaff65ccc93b5ea0061614f28 100644 (file)
@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
        COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
        COMMON_RAMROD_RESERVED,
        COMMON_RAMROD_RESERVED2,
-       COMMON_RAMROD_RESERVED3,
+       COMMON_RAMROD_PF_UPDATE,
        COMMON_RAMROD_EMPTY,
        MAX_COMMON_RAMROD_CMD_ID
 };
@@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
        u8                              reserved0[4];
 };
 
+/* tunnel configuration */
+struct pf_update_tunnel_config {
+       u8      update_rx_pf_clss;
+       u8      update_tx_pf_clss;
+       u8      set_vxlan_udp_port_flg;
+       u8      set_geneve_udp_port_flg;
+       u8      tx_enable_vxlan;
+       u8      tx_enable_l2geneve;
+       u8      tx_enable_ipgeneve;
+       u8      tx_enable_l2gre;
+       u8      tx_enable_ipgre;
+       u8      tunnel_clss_vxlan;
+       u8      tunnel_clss_l2geneve;
+       u8      tunnel_clss_ipgeneve;
+       u8      tunnel_clss_l2gre;
+       u8      tunnel_clss_ipgre;
+       __le16  vxlan_udp_port;
+       __le16  geneve_udp_port;
+       __le16  reserved[3];
+};
+
+struct pf_update_ramrod_data {
+       u32                             reserved[2];
+       u32                             reserved_1[6];
+       struct pf_update_tunnel_config  tunnel_config;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+       TUNNEL_CLSS_MAC_VLAN = 0,
+       TUNNEL_CLSS_MAC_VNI,
+       TUNNEL_CLSS_INNER_MAC_VLAN,
+       TUNNEL_CLSS_INNER_MAC_VNI,
+       MAX_TUNNEL_CLSS
+};
+
 enum ports_mode {
        ENGX2_PORTX1 /* 2 engines x 1 port */,
        ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn        *p_hwfn,
                          u16                   start_pq,
                          u16                   num_pqs);
 
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt  *p_ptt, u16 dest_port);
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, bool vxlan_enable);
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt  *p_ptt, bool eth_gre_enable,
+                       bool ip_gre_enable);
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, bool eth_geneve_enable,
+                          bool ip_geneve_enable);
+
 /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
 #define YSTORM_FLOW_CONTROL_MODE_OFFSET  (IRO[0].base)
 #define YSTORM_FLOW_CONTROL_MODE_SIZE    (IRO[0].size)
@@ -3788,7 +3837,7 @@ struct public_drv_mb {
 
 #define DRV_MSG_CODE_SET_LLDP                   0x24000000
 #define DRV_MSG_CODE_SET_DCBX                   0x25000000
-
+#define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                  0x30000000
 
 #define DRV_MSG_CODE_INITIATE_FLR               0x02000000
@@ -3808,6 +3857,7 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_PHY_CORE_WRITE             0x000e0000
 #define DRV_MSG_CODE_SET_VERSION                0x000f0000
 
+#define DRV_MSG_CODE_BIST_TEST                  0x001e0000
 #define DRV_MSG_CODE_SET_LED_MODE               0x00200000
 
 #define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
@@ -3865,6 +3915,18 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_SET_LED_MODE_ON            0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF           0x2
 
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST          0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST         1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST            2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN            0
+#define DRV_MB_PARAM_BIST_RC_PASSED             1
+#define DRV_MB_PARAM_BIST_RC_FAILED             2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER          3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT      0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK       0x000000FF
+
        u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                        0xffff0000
 #define FW_MSG_CODE_DRV_LOAD_ENGINE             0x10100000
@@ -5067,4 +5129,8 @@ struct hw_set_image {
        struct hw_set_info      hw_sets[1];
 };
 
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                   u8 pf_id, u16 pf_wfq);
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                      u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
 #endif
index f55ebdc3c8322ac5255ba779beadcdee25e3e309..e8a3b9da59b5f2cd559dae84502b30a16603de68 100644 (file)
@@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   u8 pf_id, u16 pf_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+       if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+       return 0;
+}
+
 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
                   struct qed_ptt *p_ptt,
                   u8 pf_id,
@@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u16 first_tx_pq_id[NUM_OF_TCS],
+                      u16 vport_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+       u8 tc;
+
+       if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+               return -1;
+       }
+
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               u16 vport_pq_id = first_tx_pq_id[tc];
+
+               if (vport_pq_id != QM_INVALID_PQ_ID)
+                       qed_wr(p_hwfn, p_ptt,
+                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+                              inc_val);
+       }
+
+       return 0;
+}
+
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
                      struct qed_ptt *p_ptt,
                      u8 vport_id,
@@ -788,3 +828,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 
        return true;
 }
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+       if (enable)
+               set_bit(bit, var);
+       else
+               clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT        -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            u16 dest_port)
+{
+       qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         bool vxlan_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+              vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                       bool eth_gre_enable, bool ip_gre_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+       shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+              eth_gre_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+              ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             u16 dest_port)
+{
+       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          bool eth_geneve_enable,
+                          bool ip_geneve_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+              eth_geneve_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+       /* comp ver */
+       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+       /* EDPM with geneve tunnel not supported in BB_B0 */
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               return;
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+              eth_geneve_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+              ip_geneve_enable ? 1 : 0);
+}
index 3f35c6ca92528d3cc1951df3d09f8ef19c3dd8ba..31e1d510a991894441f83494953196f18900a003 100644 (file)
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 
-enum qed_rss_caps {
-       QED_RSS_IPV4            = 0x1,
-       QED_RSS_IPV6            = 0x2,
-       QED_RSS_IPV4_TCP        = 0x4,
-       QED_RSS_IPV6_TCP        = 0x8,
-       QED_RSS_IPV4_UDP        = 0x10,
-       QED_RSS_IPV6_UDP        = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
 struct qed_rss_params {
        u8      update_rss_config;
        u8      rss_enable;
@@ -1428,16 +1415,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                        sizeof(port_stats));
 
        p_stats->rx_64_byte_packets             += port_stats.pmm.r64;
-       p_stats->rx_127_byte_packets            += port_stats.pmm.r127;
-       p_stats->rx_255_byte_packets            += port_stats.pmm.r255;
-       p_stats->rx_511_byte_packets            += port_stats.pmm.r511;
-       p_stats->rx_1023_byte_packets           += port_stats.pmm.r1023;
-       p_stats->rx_1518_byte_packets           += port_stats.pmm.r1518;
-       p_stats->rx_1522_byte_packets           += port_stats.pmm.r1522;
-       p_stats->rx_2047_byte_packets           += port_stats.pmm.r2047;
-       p_stats->rx_4095_byte_packets           += port_stats.pmm.r4095;
-       p_stats->rx_9216_byte_packets           += port_stats.pmm.r9216;
-       p_stats->rx_16383_byte_packets          += port_stats.pmm.r16383;
+       p_stats->rx_65_to_127_byte_packets      += port_stats.pmm.r127;
+       p_stats->rx_128_to_255_byte_packets     += port_stats.pmm.r255;
+       p_stats->rx_256_to_511_byte_packets     += port_stats.pmm.r511;
+       p_stats->rx_512_to_1023_byte_packets    += port_stats.pmm.r1023;
+       p_stats->rx_1024_to_1518_byte_packets   += port_stats.pmm.r1518;
+       p_stats->rx_1519_to_1522_byte_packets   += port_stats.pmm.r1522;
+       p_stats->rx_1519_to_2047_byte_packets   += port_stats.pmm.r2047;
+       p_stats->rx_2048_to_4095_byte_packets   += port_stats.pmm.r4095;
+       p_stats->rx_4096_to_9216_byte_packets   += port_stats.pmm.r9216;
+       p_stats->rx_9217_to_16383_byte_packets  += port_stats.pmm.r16383;
        p_stats->rx_crc_errors                  += port_stats.pmm.rfcs;
        p_stats->rx_mac_crtl_frames             += port_stats.pmm.rxcf;
        p_stats->rx_pause_frames                += port_stats.pmm.rxpf;
@@ -1744,9 +1731,7 @@ static int qed_update_vport(struct qed_dev *cdev,
                sp_rss_params.update_rss_capabilities = 1;
                sp_rss_params.update_rss_ind_table = 1;
                sp_rss_params.update_rss_key = 1;
-               sp_rss_params.rss_caps = QED_RSS_IPV4 |
-                                        QED_RSS_IPV6 |
-                                        QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+               sp_rss_params.rss_caps = params->rss_params.rss_caps;
                sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
                memcpy(sp_rss_params.rss_ind_table,
                       params->rss_params.rss_ind_table,
@@ -1899,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev,
        return 0;
 }
 
+static int qed_tunn_configure(struct qed_dev *cdev,
+                             struct qed_tunn_params *tunn_params)
+{
+       struct qed_tunn_update_params tunn_info;
+       int i, rc;
+
+       memset(&tunn_info, 0, sizeof(tunn_info));
+       if (tunn_params->update_vxlan_port == 1) {
+               tunn_info.update_vxlan_udp_port = 1;
+               tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+       }
+
+       if (tunn_params->update_geneve_port == 1) {
+               tunn_info.update_geneve_udp_port = 1;
+               tunn_info.geneve_udp_port = tunn_params->geneve_port;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+                                              QED_SPQ_MODE_EBLOCK, NULL);
+
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
                                        enum qed_filter_rx_mode_type type)
 {
@@ -2041,16 +2056,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
        .fastpath_stop = &qed_fastpath_stop,
        .eth_cqe_completion = &qed_fp_cqe_completion,
        .get_vport_stats = &qed_get_vport_stats,
+       .tunn_config = &qed_tunn_configure,
 };
 
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
 {
-       if (version != QED_ETH_INTERFACE_VERSION) {
-               pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
-                         version, QED_ETH_INTERFACE_VERSION);
-               return NULL;
-       }
-
        return &qed_eth_ops_pass;
 }
 EXPORT_SYMBOL(qed_get_eth_ops);
index 26d40db07ddd12a13ae07f7494e3244e03f70af4..1b758bdec58792b072c62a5c39c9c4536600fed7 100644 (file)
@@ -28,6 +28,7 @@
 #include "qed_dev_api.h"
 #include "qed_mcp.h"
 #include "qed_hw.h"
+#include "qed_selftest.h"
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -744,6 +745,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 static int qed_slowpath_start(struct qed_dev *cdev,
                              struct qed_slowpath_params *params)
 {
+       struct qed_tunn_start_params tunn_info;
        struct qed_mcp_drv_version drv_version;
        const u8 *data = NULL;
        struct qed_hwfn *hwfn;
@@ -776,7 +778,19 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        /* Start the slowpath */
        data = cdev->firmware->data;
 
-       rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+       memset(&tunn_info, 0, sizeof(tunn_info));
+       tunn_info.tunn_mode |=  1 << QED_MODE_VXLAN_TUNN |
+                               1 << QED_MODE_L2GRE_TUNN |
+                               1 << QED_MODE_IPGRE_TUNN |
+                               1 << QED_MODE_L2GENEVE_TUNN |
+                               1 << QED_MODE_IPGENEVE_TUNN;
+
+       tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+       tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+       tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+
+       rc = qed_hw_init(cdev, &tunn_info, true,
+                        cdev->int_params.out.int_mode,
                         true, data);
        if (rc)
                goto err2;
@@ -902,6 +916,11 @@ static u32 qed_sb_release(struct qed_dev *cdev,
        return rc;
 }
 
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+       return true;
+}
+
 static int qed_set_link(struct qed_dev *cdev,
                        struct qed_link_params *params)
 {
@@ -944,6 +963,39 @@ static int qed_set_link(struct qed_dev *cdev,
        }
        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
                link_params->speed.forced_speed = params->forced_speed;
+       if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+               if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+                       link_params->pause.autoneg = true;
+               else
+                       link_params->pause.autoneg = false;
+               if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+                       link_params->pause.forced_rx = true;
+               else
+                       link_params->pause.forced_rx = false;
+               if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+                       link_params->pause.forced_tx = true;
+               else
+                       link_params->pause.forced_tx = false;
+       }
+       if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+               switch (params->loopback_mode) {
+               case QED_LINK_LOOPBACK_INT_PHY:
+                       link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+                       break;
+               case QED_LINK_LOOPBACK_EXT_PHY:
+                       link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+                       break;
+               case QED_LINK_LOOPBACK_EXT:
+                       link_params->loopback_mode = PMM_LOOPBACK_EXT;
+                       break;
+               case QED_LINK_LOOPBACK_MAC:
+                       link_params->loopback_mode = PMM_LOOPBACK_MAC;
+                       break;
+               default:
+                       link_params->loopback_mode = PMM_LOOPBACK_NONE;
+                       break;
+               }
+       }
 
        rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
 
@@ -1150,7 +1202,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
        return status;
 }
 
+struct qed_selftest_ops qed_selftest_ops_pass = {
+       .selftest_memory = &qed_selftest_memory,
+       .selftest_interrupt = &qed_selftest_interrupt,
+       .selftest_register = &qed_selftest_register,
+       .selftest_clock = &qed_selftest_clock,
+};
+
 const struct qed_common_ops qed_common_ops_pass = {
+       .selftest = &qed_selftest_ops_pass,
        .probe = &qed_probe,
        .remove = &qed_remove,
        .set_power_state = &qed_set_power_state,
@@ -1164,6 +1224,7 @@ const struct qed_common_ops qed_common_ops_pass = {
        .sb_release = &qed_sb_release,
        .simd_handler_config = &qed_simd_handler_config,
        .simd_handler_clean = &qed_simd_handler_clean,
+       .can_link_change = &qed_can_link_change,
        .set_link = &qed_set_link,
        .get_link = &qed_get_current_link,
        .drain = &qed_drain,
@@ -1172,14 +1233,3 @@ const struct qed_common_ops qed_common_ops_pass = {
        .chain_free = &qed_chain_free,
        .set_led = &qed_set_led,
 };
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
-       switch (protocol) {
-       case QED_PROTOCOL_ETH:
-               return QED_ETH_INTERFACE_VERSION;
-       default:
-               return 0;
-       }
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
index b89c9a8e16557869cfa6682e09c06111327d730f..2f8309d772c8f7d233bfc284cc876d5e90351dc4 100644 (file)
@@ -472,6 +472,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
                                       bool b_reset)
 {
        struct qed_mcp_link_state *p_link;
+       u8 max_bw, min_bw;
        u32 status = 0;
 
        p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +528,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
                p_link->speed = 0;
        }
 
-       /* Correct speed according to bandwidth allocation */
-       if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
-               p_link->speed = p_link->speed *
-                               p_hwfn->mcp_info->func_info.bandwidth_max /
-                               100;
-               qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-                              p_link->speed);
-               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
-                          "Configured MAX bandwidth to be %08x Mb/sec\n",
-                          p_link->speed);
-       }
+       if (p_link->link_up && p_link->speed)
+               p_link->line_speed = p_link->speed;
+       else
+               p_link->line_speed = 0;
+
+       max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+       min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+       /* Max bandwidth configuration */
+       __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+       /* Min bandwidth configuration */
+       __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+       qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
 
        p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
        p_link->an_complete = !!(status &
@@ -648,6 +652,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+                                 struct public_func *p_shmem_info)
+{
+       struct qed_mcp_function_info *p_info;
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       p_info->bandwidth_min = (p_shmem_info->config &
+                                FUNC_MF_CFG_MIN_BW_MASK) >>
+                                       FUNC_MF_CFG_MIN_BW_SHIFT;
+       if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+                       p_info->bandwidth_min);
+               p_info->bandwidth_min = 1;
+       }
+
+       p_info->bandwidth_max = (p_shmem_info->config &
+                                FUNC_MF_CFG_MAX_BW_MASK) >>
+                                       FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+                       p_info->bandwidth_max);
+               p_info->bandwidth_max = 100;
+       }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct public_func *p_data,
+                                 int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       memset(p_data, 0, sizeof(*p_data));
+
+       size = min_t(u32, sizeof(*p_data),
+                    QED_SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+                                           func_addr + (i << 2));
+       return size;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_function_info *p_info;
+       struct public_func shmem_info;
+       u32 resp = 0, param = 0;
+
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                              MCP_PF_ID(p_hwfn));
+
+       qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+       qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+       /* Acknowledge the MFW */
+       qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+                   &param);
+}
+
 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt)
 {
@@ -679,6 +754,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
                        qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
                        break;
+               case MFW_DRV_MSG_BW_UPDATE:
+                       qed_mcp_update_bw(p_hwfn, p_ptt);
+                       break;
                default:
                        DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
                        rc = -EINVAL;
@@ -758,28 +836,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
        return 0;
 }
 
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
-                                 struct qed_ptt *p_ptt,
-                                 struct public_func *p_data,
-                                 int pfid)
-{
-       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-                                       PUBLIC_FUNC);
-       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
-       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
-       u32 i, size;
-
-       memset(p_data, 0, sizeof(*p_data));
-
-       size = min_t(u32, sizeof(*p_data),
-                    QED_SECTION_SIZE(mfw_path_offsize));
-       for (i = 0; i < size / sizeof(u32); i++)
-               ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
-                                           func_addr + (i << 2));
-
-       return size;
-}
-
 static int
 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
                        struct public_func *p_info,
@@ -818,26 +874,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
-
-       info->bandwidth_min = (shmem_info.config &
-                              FUNC_MF_CFG_MIN_BW_MASK) >>
-                             FUNC_MF_CFG_MIN_BW_SHIFT;
-       if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
-               DP_INFO(p_hwfn,
-                       "bandwidth minimum out of bounds [%02x]. Set to 1\n",
-                       info->bandwidth_min);
-               info->bandwidth_min = 1;
-       }
-
-       info->bandwidth_max = (shmem_info.config &
-                              FUNC_MF_CFG_MAX_BW_MASK) >>
-                             FUNC_MF_CFG_MAX_BW_SHIFT;
-       if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
-               DP_INFO(p_hwfn,
-                       "bandwidth maximum out of bounds [%02x]. Set to 100\n",
-                       info->bandwidth_max);
-               info->bandwidth_max = 100;
-       }
+       qed_read_pf_bandwidth(p_hwfn, &shmem_info);
 
        if (shmem_info.mac_upper || shmem_info.mac_lower) {
                info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -938,9 +975,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
 
        p_drv_version = &union_data.drv_version;
        p_drv_version->version = p_ver->version;
+
        for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
                val = cpu_to_be32(p_ver->name[i]);
-               *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+               *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
        }
 
        memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1017,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
        return rc;
 }
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 drv_mb_param = 0, rsp, param;
+       int rc = 0;
+
+       drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+                       DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+                        drv_mb_param, &rsp, &param);
+
+       if (rc)
+               return rc;
+
+       if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+           (param != DRV_MB_PARAM_BIST_RC_PASSED))
+               rc = -EAGAIN;
+
+       return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 drv_mb_param, rsp, param;
+       int rc = 0;
+
+       drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+                       DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+                        drv_mb_param, &rsp, &param);
+
+       if (rc)
+               return rc;
+
+       if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+           (param != DRV_MB_PARAM_BIST_RC_PASSED))
+               rc = -EAGAIN;
+
+       return rc;
+}
index 50917a2131a5da04e0134f183f6cce4509175c29..5f218eed05415674a2d919ae76a2bd4ca9977634 100644 (file)
@@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
 struct qed_mcp_link_state {
        bool    link_up;
 
-       u32     speed; /* In Mb/s */
+       u32     min_pf_rate;
+
+       /* Actual link speed in Mb/s */
+       u32     line_speed;
+
+       /* PF max speed in Mb/s, deduced from line_speed
+        * according to PF max bandwidth configuration.
+        */
+       u32     speed;
        bool    full_duplex;
 
        bool    an;
@@ -237,6 +245,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
                    struct qed_ptt *p_ptt,
                    enum qed_led_mode mode);
 
+/**
+ * @brief Bist register test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt);
+
 /* Using hwfn number (and not pf_num) is required since in CMT mode,
  * same pf_num may be used by two different hwfn
  * TODO - this shouldn't really be in .h file, but until all fields
@@ -388,5 +418,14 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
  * @return true iff MFW is running and mcp_info is initialized
  */
 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
-
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 min_bw);
 #endif
index c15b1622e636e4509c6e537fb3a68018d661a43e..bf4d7ccd56bb7f4c7da2ac619ac90303ec27edb9 100644 (file)
        0x2aae60UL
 #define PGLUE_B_REG_PF_BAR1_SIZE \
        0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN  0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL           0x1f0734UL
+#define PRS_REG_VXLAN_PORT             0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0      0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE                0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE            (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT      0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE             (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT       1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE                   (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT             2
+
+#define NIG_REG_VXLAN_PORT             0x50105cUL
+#define PBF_REG_VXLAN_PORT             0xd80518UL
+#define PBF_REG_NGE_PORT               0xd8051cUL
+#define PRS_REG_NGE_PORT               0x1f086cUL
+#define NIG_REG_NGE_PORT               0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN     0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN      0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN       0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN      0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN     0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE                  0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE                 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER                   0x508b30UL
+#define PBF_REG_NGE_COMP_VER                   0xd80524UL
+#define PRS_REG_NGE_COMP_VER                   0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT     0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT     0x2fa000UL
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644 (file)
index 0000000..a342bfe
--- /dev/null
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+       int rc = 0, i;
+
+       for_each_hwfn(cdev, i) {
+               rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+       int rc = 0, i;
+
+       for_each_hwfn(cdev, i) {
+               rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc = 0, i;
+
+       /* although performed by MCP, this test is per engine */
+       for_each_hwfn(cdev, i) {
+               p_hwfn = &cdev->hwfns[i];
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "failed to acquire ptt\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+               qed_ptt_release(p_hwfn, p_ptt);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc = 0, i;
+
+       /* although performed by MCP, this test is per engine */
+       for_each_hwfn(cdev, i) {
+               p_hwfn = &cdev->hwfns[i];
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "failed to acquire ptt\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+               qed_ptt_release(p_hwfn, p_ptt);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644 (file)
index 0000000..50eb0b4
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
index d39f914b66ee869714b023fad1fd1a557c369208..eec137f4089570a09d8fd8aaf9acc6831299f4ba 100644 (file)
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 
 union ramrod_data {
        struct pf_start_ramrod_data pf_start;
+       struct pf_update_ramrod_data pf_update;
        struct rx_queue_start_ramrod_data rx_queue_start;
        struct rx_queue_update_ramrod_data rx_queue_update;
        struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -338,12 +339,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
  * to the internal RAM of the UStorm by the Function Start Ramrod.
  *
  * @param p_hwfn
+ * @param p_tunn
  * @param mode
  *
  * @return int
  */
 
 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_tunn_start_params *p_tunn,
                    enum qed_mf_mode mode);
 
 /**
@@ -362,4 +365,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
 
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_tunn,
+                             enum spq_mode comp_mode,
+                             struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
 #endif
index 1c06c37d4c3d75a1650dcb82cd6de9385589db8f..e1e2344b1906c822009b7e7caa18dcffdf1f8041 100644 (file)
@@ -87,7 +87,217 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+       switch (type) {
+       case QED_TUNN_CLSS_MAC_VLAN:
+               return TUNNEL_CLSS_MAC_VLAN;
+       case QED_TUNN_CLSS_MAC_VNI:
+               return TUNNEL_CLSS_MAC_VNI;
+       case QED_TUNN_CLSS_INNER_MAC_VLAN:
+               return TUNNEL_CLSS_INNER_MAC_VLAN;
+       case QED_TUNN_CLSS_INNER_MAC_VNI:
+               return TUNNEL_CLSS_INNER_MAC_VNI;
+       default:
+               return TUNNEL_CLSS_MAC_VLAN;
+       }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_src,
+                             struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+       unsigned long update_mask = p_src->tunn_mode_update_mask;
+       unsigned long tunn_mode = p_src->tunn_mode;
+       unsigned long new_tunn_mode = 0;
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       }
+
+       p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_src,
+                             struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode = p_src->tunn_mode;
+       enum tunnel_clss type;
+
+       qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan  = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                unsigned long tunn_mode)
+{
+       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               l2gre_enable = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               ipgre_enable = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               vxlan_enable = 1;
+
+       qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+       qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               l2geneve_enable = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               ipgeneve_enable = 1;
+
+       qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+                             ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+                            struct qed_tunn_start_params *p_src,
+                            struct pf_start_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode;
+       enum tunnel_clss type;
+
+       if (!p_src)
+               return;
+
+       tunn_mode = p_src->tunn_mode;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_tunn_start_params *p_tunn,
                    enum qed_mf_mode mode)
 {
        struct pf_start_ramrod_data *p_ramrod = NULL;
@@ -143,6 +353,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
                       p_hwfn->p_consq->chain.pbl.p_phys_table);
 
+       qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+                                    &p_ramrod->tunnel_config);
        p_hwfn->hw_info.personality = PERSONALITY_ETH;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -150,7 +362,58 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
                   sb, sb_index,
                   p_ramrod->outer_tag);
 
-       return qed_spq_post(p_hwfn, p_ent, NULL);
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       if (p_tunn) {
+               qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                    p_tunn->tunn_mode);
+               p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_tunn,
+                             enum spq_mode comp_mode,
+                             struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+                                     &p_ent->ramrod.pf_update.tunnel_config);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       if (p_tunn->update_vxlan_udp_port)
+               qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                       p_tunn->vxlan_udp_port);
+       if (p_tunn->update_geneve_udp_port)
+               qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                        p_tunn->geneve_udp_port);
+
+       qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+       p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+       return rc;
 }
 
 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
@@ -173,3 +436,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
index d023251544d9e7f8df8a1e6c897293385bd45fc1..ff3ac0caad5b38b1b544dc24b79407780a75df84 100644 (file)
 
 #define QEDE_MAJOR_VERSION             8
 #define QEDE_MINOR_VERSION             7
-#define QEDE_REVISION_VERSION          0
-#define QEDE_ENGINEERING_VERSION       0
+#define QEDE_REVISION_VERSION          1
+#define QEDE_ENGINEERING_VERSION       20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
                __stringify(QEDE_MINOR_VERSION) "."             \
                __stringify(QEDE_REVISION_VERSION) "."          \
                __stringify(QEDE_ENGINEERING_VERSION)
 
-#define QEDE_ETH_INTERFACE_VERSION     300
-
 #define DRV_MODULE_SYM         qede
 
 struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
 
        /* port */
        u64 rx_64_byte_packets;
-       u64 rx_127_byte_packets;
-       u64 rx_255_byte_packets;
-       u64 rx_511_byte_packets;
-       u64 rx_1023_byte_packets;
-       u64 rx_1518_byte_packets;
-       u64 rx_1522_byte_packets;
-       u64 rx_2047_byte_packets;
-       u64 rx_4095_byte_packets;
-       u64 rx_9216_byte_packets;
-       u64 rx_16383_byte_packets;
+       u64 rx_65_to_127_byte_packets;
+       u64 rx_128_to_255_byte_packets;
+       u64 rx_256_to_511_byte_packets;
+       u64 rx_512_to_1023_byte_packets;
+       u64 rx_1024_to_1518_byte_packets;
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
        u64 rx_crc_errors;
        u64 rx_mac_crtl_frames;
        u64 rx_pause_frames;
@@ -156,6 +154,10 @@ struct qede_dev {
              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
        struct qede_stats               stats;
+#define QEDE_RSS_INDIR_INITED  BIT(0)
+#define QEDE_RSS_KEY_INITED    BIT(1)
+#define QEDE_RSS_CAPS_INITED   BIT(2)
+       u32 rss_params_inited; /* bit-field to track initialized rss params */
        struct qed_update_vport_rss_params      rss_params;
        u16                     q_num_rx_buffers; /* Must be a power of two */
        u16                     q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +169,8 @@ struct qede_dev {
        bool accept_any_vlan;
        struct delayed_work             sp_task;
        unsigned long                   sp_flags;
+       u16                             vxlan_dst_port;
+       u16                             geneve_dst_port;
 };
 
 enum QEDE_STATE {
@@ -286,8 +290,11 @@ struct qede_fastpath {
 
 #define QEDE_CSUM_ERROR                        BIT(0)
 #define QEDE_CSUM_UNNECESSARY          BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY     BIT(2)
 
-#define QEDE_SP_RX_MODE                1
+#define QEDE_SP_RX_MODE                        1
+#define QEDE_SP_VXLAN_PORT_CONFIG      2
+#define QEDE_SP_GENEVE_PORT_CONFIG     3
 
 union qede_reload_args {
        u16 mtu;
@@ -301,6 +308,10 @@ void qede_reload(struct qede_dev *edev,
                 union qede_reload_args *args);
 int qede_change_mtu(struct net_device *dev, int new_mtu);
 void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+                            u8 count);
 
 #define RX_RING_SIZE_POW       13
 #define RX_RING_SIZE           ((u16)BIT(RX_RING_SIZE_POW))
index c49dc10ce15105bea0e676b3e3f9586d096298c4..0d04f163ae45bd2139797c74a97583f0b88fa106 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/string.h>
 #include <linux/pci.h>
@@ -27,6 +28,9 @@
 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
 #define QEDE_RQSTAT(stat_name) \
         {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
 static const struct {
        u64 offset;
        char string[ETH_GSTRING_LEN];
@@ -59,16 +63,16 @@ static const struct {
        QEDE_STAT(tx_bcast_pkts),
 
        QEDE_PF_STAT(rx_64_byte_packets),
-       QEDE_PF_STAT(rx_127_byte_packets),
-       QEDE_PF_STAT(rx_255_byte_packets),
-       QEDE_PF_STAT(rx_511_byte_packets),
-       QEDE_PF_STAT(rx_1023_byte_packets),
-       QEDE_PF_STAT(rx_1518_byte_packets),
-       QEDE_PF_STAT(rx_1522_byte_packets),
-       QEDE_PF_STAT(rx_2047_byte_packets),
-       QEDE_PF_STAT(rx_4095_byte_packets),
-       QEDE_PF_STAT(rx_9216_byte_packets),
-       QEDE_PF_STAT(rx_16383_byte_packets),
+       QEDE_PF_STAT(rx_65_to_127_byte_packets),
+       QEDE_PF_STAT(rx_128_to_255_byte_packets),
+       QEDE_PF_STAT(rx_256_to_511_byte_packets),
+       QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+       QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+       QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+       QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+       QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+       QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+       QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
        QEDE_PF_STAT(tx_64_byte_packets),
        QEDE_PF_STAT(tx_65_to_127_byte_packets),
        QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,6 +120,32 @@ static const struct {
 
 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
 
+enum {
+       QEDE_PRI_FLAG_CMT,
+       QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+       "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+       QEDE_ETHTOOL_INT_LOOPBACK,
+       QEDE_ETHTOOL_INTERRUPT_TEST,
+       QEDE_ETHTOOL_MEMORY_TEST,
+       QEDE_ETHTOOL_REGISTER_TEST,
+       QEDE_ETHTOOL_CLOCK_TEST,
+       QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+       "Internal loopback (offline)",
+       "Interrupt (online)\t",
+       "Memory (online)\t\t",
+       "Register (online)\t",
+       "Clock (online)\t\t",
+};
+
 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 {
        int i, j, k;
@@ -139,6 +169,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
        case ETH_SS_STATS:
                qede_get_strings_stats(edev, buf);
                break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(buf, qede_private_arr,
+                      ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+               break;
+       case ETH_SS_TEST:
+               memcpy(buf, qede_tests_str_arr,
+                      ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+               break;
        default:
                DP_VERBOSE(edev, QED_MSG_DEBUG,
                           "Unsupported stringset 0x%08x\n", stringset);
@@ -177,7 +215,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
        switch (stringset) {
        case ETH_SS_STATS:
                return num_stats + QEDE_NUM_RQSTATS;
-
+       case ETH_SS_PRIV_FLAGS:
+               return QEDE_PRI_FLAG_LEN;
+       case ETH_SS_TEST:
+               return QEDE_ETHTOOL_TEST_MAX;
        default:
                DP_VERBOSE(edev, QED_MSG_DEBUG,
                           "Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +226,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
        }
 }
 
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
 static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +265,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        struct qed_link_params params;
        u32 speed;
 
-       if (!edev->dev_info.common.is_mf_default) {
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
                DP_INFO(edev,
-                       "Link parameters can not be changed in non-default mode\n");
+                       "Link settings are not allowed to be changed\n");
                return -EOPNOTSUPP;
        }
 
@@ -328,6 +376,12 @@ static int qede_nway_reset(struct net_device *dev)
        struct qed_link_output current_link;
        struct qed_link_params link_params;
 
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+               DP_INFO(edev,
+                       "Link settings are not allowed to be changed\n");
+               return -EOPNOTSUPP;
+       }
+
        if (!netif_running(dev))
                return 0;
 
@@ -428,9 +482,9 @@ static int qede_set_pauseparam(struct net_device *dev,
        struct qed_link_params params;
        struct qed_link_output current_link;
 
-       if (!edev->dev_info.common.is_mf_default) {
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
                DP_INFO(edev,
-                       "Pause parameters can not be updated in non-default mode\n");
+                       "Pause settings are not allowed to be changed\n");
                return -EOPNOTSUPP;
        }
 
@@ -569,6 +623,497 @@ static int qede_set_phys_id(struct net_device *dev,
        return 0;
 }
 
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V4_FLOW:
+               if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+                       info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V6_FLOW:
+               if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+                       info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               break;
+       default:
+               info->data = 0;
+               break;
+       }
+
+       return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                         u32 *rules __always_unused)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = edev->num_rss;
+               return 0;
+       case ETHTOOL_GRXFH:
+               return qede_get_rss_flags(edev, info);
+       default:
+               DP_ERR(edev, "Command parameters not supported\n");
+               return -EOPNOTSUPP;
+       }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+       struct qed_update_vport_params vport_update_params;
+       u8 set_caps = 0, clr_caps = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Set rss flags command parameters: flow type = %d, data = %llu\n",
+                  info->flow_type, info->data);
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               /* For TCP only 4-tuple hash is supported */
+               if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+                                 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       case UDP_V4_FLOW:
+               /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+               if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+                                  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       set_caps = QED_RSS_IPV4_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple enabled\n");
+               } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+                       clr_caps = QED_RSS_IPV4_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple disabled\n");
+               } else {
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V6_FLOW:
+               /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+               if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+                                  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       set_caps = QED_RSS_IPV6_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple enabled\n");
+               } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+                       clr_caps = QED_RSS_IPV6_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple disabled\n");
+               } else {
+                       return -EINVAL;
+               }
+               break;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               /* For IP only 2-tuple hash is supported */
+               if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IP_USER_FLOW:
+       case ETHER_FLOW:
+               /* RSS is not supported for these protocols */
+               if (info->data) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       /* No action is needed if there is no change in the rss capability */
+       if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+                                          ~clr_caps) | set_caps))
+               return 0;
+
+       /* Update internal configuration */
+       edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+                                   set_caps;
+       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+       /* Re-configure if possible */
+       if (netif_running(edev->ndev)) {
+               memset(&vport_update_params, 0, sizeof(vport_update_params));
+               vport_update_params.update_rss_flg = 1;
+               vport_update_params.vport_id = 0;
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
+               return edev->ops->vport_update(edev->cdev,
+                                              &vport_update_params);
+       }
+
+       return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               return qede_set_rss_flags(edev, info);
+       default:
+               DP_INFO(edev, "Command parameters not supported\n");
+               return -EOPNOTSUPP;
+       }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+       return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int i;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (!indir)
+               return 0;
+
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+               indir[i] = edev->rss_params.rss_ind_table[i];
+
+       if (key)
+               memcpy(key, edev->rss_params.rss_key,
+                      qede_get_rxfh_key_size(dev));
+
+       return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+{
+       struct qed_update_vport_params vport_update_params;
+       struct qede_dev *edev = netdev_priv(dev);
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (!indir && !key)
+               return 0;
+
+       if (indir) {
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+                       edev->rss_params.rss_ind_table[i] = indir[i];
+               edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+       }
+
+       if (key) {
+               memcpy(&edev->rss_params.rss_key, key,
+                      qede_get_rxfh_key_size(dev));
+               edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+       }
+
+       if (netif_running(edev->ndev)) {
+               memset(&vport_update_params, 0, sizeof(vport_update_params));
+               vport_update_params.update_rss_flg = 1;
+               vport_update_params.vport_id = 0;
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
+               return edev->ops->vport_update(edev->cdev,
+                                              &vport_update_params);
+       }
+
+       return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+       int i;
+
+       if (!netif_running(edev->ndev))
+               return;
+
+       for_each_rss(i) {
+               /* Update and reenable interrupts */
+               qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+               napi_enable(&edev->fp_array[i].napi);
+       }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               napi_disable(&edev->fp_array[i].napi);
+               /* Disable interrupts */
+               qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+       }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+                                         struct sk_buff *skb)
+{
+       struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+       struct eth_tx_1st_bd *first_bd;
+       dma_addr_t mapping;
+       int i, idx, val;
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring[idx].skb = skb;
+       first_bd = qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+       first_bd->data.bd_flags.bitfields = val;
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               return -ENOMEM;
+       }
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = 1;
+       txq->sw_tx_prod++;
+       /* 'next page' entries are counted in the producer value */
+       val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+       txq->tx_db.data.bd_prod = val;
+
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+
+       for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+               if (qede_txq_has_work(txq))
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (!qede_txq_has_work(txq)) {
+               DP_NOTICE(edev, "Tx completion didn't happen\n");
+               return -1;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+       txq->sw_tx_cons++;
+       txq->sw_tx_ring[idx].skb = NULL;
+
+       return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+       struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       struct sw_rx_data *sw_rx_data;
+       union eth_rx_cqe *cqe;
+       u8 *data_ptr;
+       int i;
+
+       /* The packet is expected to receive on rx-queue 0 even though RSS is
+        * enabled. This is because the queue 0 is configured as the default
+        * queue and that the loopback traffic is not IP.
+        */
+       for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+               if (qede_has_rx_work(rxq))
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (!qede_has_rx_work(rxq)) {
+               DP_NOTICE(edev, "Failed to receive the traffic\n");
+               return -1;
+       }
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD before reading hw_comp_cons. If the CQE is read before it is
+        * written by FW, then FW writes CQE and SB, and then the CPU reads the
+        * hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+       /* Get the data from the SW ring */
+       sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+       sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+       fp_cqe = &cqe->fast_path_regular;
+       len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+       data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+                    fp_cqe->placement_offset + sw_rx_data->page_offset);
+       for (i = ETH_HLEN; i < len; i++)
+               if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+                       DP_NOTICE(edev, "Loopback test failed\n");
+                       qede_recycle_rx_bd_ring(rxq, edev, 1);
+                       return -1;
+               }
+
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+       return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+       struct qed_link_params link_params;
+       struct sk_buff *skb = NULL;
+       int rc = 0, i;
+       u32 pkt_size;
+       u8 *packet;
+
+       if (!netif_running(edev->ndev)) {
+               DP_NOTICE(edev, "Interface is down\n");
+               return -EINVAL;
+       }
+
+       qede_netif_stop(edev);
+
+       /* Bring up the link in Loopback mode */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+       link_params.loopback_mode = loopback_mode;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Wait for loopback configuration to apply */
+       msleep_interruptible(500);
+
+       /* prepare the loopback packet */
+       pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+       skb = netdev_alloc_skb(edev->ndev, pkt_size);
+       if (!skb) {
+               DP_INFO(edev, "Can't allocate skb\n");
+               rc = -ENOMEM;
+               goto test_loopback_exit;
+       }
+       packet = skb_put(skb, pkt_size);
+       ether_addr_copy(packet, edev->ndev->dev_addr);
+       ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+       memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+       for (i = ETH_HLEN; i < pkt_size; i++)
+               packet[i] = (unsigned char)(i & 0xff);
+
+       rc = qede_selftest_transmit_traffic(edev, skb);
+       if (rc)
+               goto test_loopback_exit;
+
+       rc = qede_selftest_receive_traffic(edev);
+       if (rc)
+               goto test_loopback_exit;
+
+       DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+       dev_kfree_skb(skb);
+
+       /* Bring up the link in Normal mode */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+       link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Wait for loopback configuration to apply */
+       msleep_interruptible(500);
+
+       qede_netif_start(edev);
+
+       return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+                          struct ethtool_test *etest, u64 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Self-test command parameters: offline = %d, external_lb = %d\n",
+                  (etest->flags & ETH_TEST_FL_OFFLINE),
+                  (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+       memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               if (qede_selftest_run_loopback(edev,
+                                              QED_LINK_LOOPBACK_INT_PHY)) {
+                       buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+       }
+
+       if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+               buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+               buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+               buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+               buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+}
+
 static const struct ethtool_ops qede_ethtool_ops = {
        .get_settings = qede_get_settings,
        .set_settings = qede_set_settings,
@@ -584,10 +1129,17 @@ static const struct ethtool_ops qede_ethtool_ops = {
        .get_strings = qede_get_strings,
        .set_phys_id = qede_set_phys_id,
        .get_ethtool_stats = qede_get_ethtool_stats,
+       .get_priv_flags = qede_get_priv_flags,
        .get_sset_count = qede_get_sset_count,
-
+       .get_rxnfc = qede_get_rxnfc,
+       .set_rxnfc = qede_set_rxnfc,
+       .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+       .get_rxfh_key_size = qede_get_rxfh_key_size,
+       .get_rxfh = qede_get_rxfh,
+       .set_rxfh = qede_set_rxfh,
        .get_channels = qede_get_channels,
        .set_channels = qede_set_channels,
+       .self_test = qede_self_test,
 };
 
 void qede_set_ethtool_ops(struct net_device *dev)
index 518af329502ddff20c8fc322ebed3303b7b0b9a9..075faa52eb489020a67ab3b87e7d00618899693c 100644 (file)
 #include <linux/netdev_features.h>
 #include <linux/udp.h>
 #include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
 #include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
@@ -141,19 +146,10 @@ static
 int __init qede_init(void)
 {
        int ret;
-       u32 qed_ver;
 
        pr_notice("qede_init: %s\n", version);
 
-       qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
-       if (qed_ver !=  QEDE_ETH_INTERFACE_VERSION) {
-               pr_notice("Version mismatch [%08x != %08x]\n",
-                         qed_ver,
-                         QEDE_ETH_INTERFACE_VERSION);
-               return -EINVAL;
-       }
-
-       qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+       qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                pr_notice("Failed to get qed ethtool operations\n");
                return -EINVAL;
@@ -319,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
            (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
                *ipv6_ext = 1;
 
+       if (skb->encapsulation)
+               rc |= XMIT_ENC;
+
        if (skb_is_gso(skb))
                rc |= XMIT_LSO;
 
@@ -380,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
        return 0;
 }
 
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+       if (is_encap_pkt)
+               return (skb_inner_transport_header(skb) +
+                       inner_tcp_hdrlen(skb) - skb->data);
+       else
+               return (skb_transport_header(skb) +
+                       tcp_hdrlen(skb) - skb->data);
+}
+
 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
 static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
        if (xmit_type & XMIT_LSO) {
                int hlen;
 
-               hlen = skb_transport_header(skb) +
-                      tcp_hdrlen(skb) - skb->data;
+               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
 
                /* linear payload would require its own BD */
                if (skb_headlen(skb) > hlen)
@@ -421,7 +429,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        u8 xmit_type;
        u16 idx;
        u16 hlen;
-       bool data_split;
+       bool data_split = false;
 
        /* Get tx-queue context and netdev index */
        txq_index = skb_get_queue_mapping(skb);
@@ -499,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                first_bd->data.bd_flags.bitfields |=
                        1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
 
-               first_bd->data.bitfields |= cpu_to_le16(temp);
+               if (xmit_type & XMIT_ENC) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+               } else {
+                       /* In cases when OS doesn't indicate for inner offloads
+                        * when packet is tunnelled, we need to override the HW
+                        * tunnel configuration so that packets are treated as
+                        * regular non tunnelled packets and no inner offloads
+                        * are done by the hardware.
+                        */
+                       first_bd->data.bitfields |= cpu_to_le16(temp);
+               }
 
                /* If the packet is IPv6 with extension header, indicate that
                 * to FW and pass few params, since the device cracker doesn't
@@ -515,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                third_bd->data.lso_mss =
                        cpu_to_le16(skb_shinfo(skb)->gso_size);
 
-               first_bd->data.bd_flags.bitfields |=
-               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-               hlen = skb_transport_header(skb) +
-                      tcp_hdrlen(skb) - skb->data;
+               if (unlikely(xmit_type & XMIT_ENC)) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, true);
+               } else {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, false);
+               }
 
                /* @@@TBD - if will not be removed need to check */
                third_bd->data.bitfields |=
@@ -644,7 +668,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
 {
        u16 hw_bd_cons;
 
@@ -727,7 +751,7 @@ static int qede_tx_int(struct qede_dev *edev,
        return 0;
 }
 
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
 {
        u16 hw_comp_cons, sw_comp_cons;
 
@@ -750,6 +774,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
        return false;
 }
 
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
@@ -773,6 +803,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
        curr_cons->data = NULL;
 }
 
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+                            struct qede_dev *edev, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(edev, rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
 static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                                         struct qede_rx_queue *rxq,
                                         struct sw_rx_data *curr_cons)
@@ -781,8 +826,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
        curr_cons->page_offset += rxq->rx_buf_seg_size;
 
        if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
                        return -ENOMEM;
+               }
 
                dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -852,6 +903,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
        if (csum_flag & QEDE_CSUM_UNNECESSARY)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+               skb->csum_level = 1;
 }
 
 static inline void qede_skb_receive(struct qede_dev *edev,
@@ -901,7 +955,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                           len_on_bd);
 
        if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               atomic_inc(&current_bd->data->_count);
                goto out;
        }
 
@@ -915,6 +972,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
        return 0;
 
 out:
+       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
        return -ENOMEM;
 }
 
@@ -966,8 +1025,9 @@ static void qede_tpa_start(struct qede_dev *edev,
        tpa_info->skb = netdev_alloc_skb(edev->ndev,
                                         le16_to_cpu(cqe->len_on_first_bd));
        if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
                tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-               return;
+               goto cons_buf;
        }
 
        skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1050,7 @@ static void qede_tpa_start(struct qede_dev *edev,
        /* This is needed in order to enable forwarding support */
        qede_set_gro_params(edev, tpa_info->skb, cqe);
 
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
        if (likely(cqe->ext_bd_len_list[0]))
                qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
                                   le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1068,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct iphdr));
        th = tcp_hdr(skb);
 
@@ -1022,7 +1082,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
        th = tcp_hdr(skb);
 
@@ -1037,8 +1096,21 @@ static void qede_gro_receive(struct qede_dev *edev,
                             struct sk_buff *skb,
                             u16 vlan_tag)
 {
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
 #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
+               skb_set_network_header(skb, 0);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        qede_gro_ip_csum(skb);
@@ -1053,6 +1125,8 @@ static void qede_gro_receive(struct qede_dev *edev,
                }
        }
 #endif
+
+send_skb:
        skb_record_rx_queue(skb, fp->rss_id);
        qede_skb_receive(edev, fp, skb, vlan_tag);
 }
@@ -1141,13 +1215,47 @@ err:
        tpa_info->skb = NULL;
 }
 
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 tcsum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
 {
        u16 csum_flag = 0;
        u8 csum = 0;
 
-       if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-            PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
                csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
                             PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
                csum = QEDE_CSUM_UNNECESSARY;
@@ -1162,6 +1270,14 @@ static u8 qede_check_csum(u16 flag)
        return csum;
 }
 
+static u8 qede_check_csum(u16 flag)
+{
+       if (!qede_tunn_exist(flag))
+               return qede_check_notunn_csum(flag);
+       else
+               return qede_check_tunn_csum(flag);
+}
+
 static int qede_rx_int(struct qede_fastpath *fp, int budget)
 {
        struct qede_dev *edev = fp->edev;
@@ -1244,17 +1360,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
                                  sw_comp_cons, parse_flag);
                        rxq->rx_hw_errors++;
-                       qede_reuse_page(edev, rxq, sw_rx_data);
-                       goto next_rx;
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       goto next_cqe;
                }
 
                skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
                if (unlikely(!skb)) {
                        DP_NOTICE(edev,
                                  "Build_skb failed, dropping incoming packet\n");
-                       qede_reuse_page(edev, rxq, sw_rx_data);
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
                        rxq->rx_alloc_errors++;
-                       goto next_rx;
+                       goto next_cqe;
                }
 
                /* Copy data into SKB */
@@ -1288,11 +1404,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                        if (unlikely(qede_realloc_rx_buffer(edev, rxq,
                                                            sw_rx_data))) {
                                DP_ERR(edev, "Failed to allocate rx buffer\n");
+                               /* Incr page ref count to reuse on allocation
+                                * failure so that it doesn't get freed while
+                                * freeing SKB.
+                                */
+
+                               atomic_inc(&sw_rx_data->data->_count);
                                rxq->rx_alloc_errors++;
+                               qede_recycle_rx_bd_ring(rxq, edev,
+                                                       fp_cqe->bd_num);
+                               dev_kfree_skb_any(skb);
                                goto next_cqe;
                        }
                }
 
+               qede_rx_bd_ring_consume(rxq);
+
                if (fp_cqe->bd_num != 1) {
                        u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
                        u8 num_frags;
@@ -1303,18 +1430,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                             num_frags--) {
                                u16 cur_size = pkt_len > rxq->rx_buf_size ?
                                                rxq->rx_buf_size : pkt_len;
+                               if (unlikely(!cur_size)) {
+                                       DP_ERR(edev,
+                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                                              num_frags);
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
+                                       goto next_cqe;
+                               }
 
-                               WARN_ONCE(!cur_size,
-                                         "Still got %d BDs for mapping jumbo, but length became 0\n",
-                                         num_frags);
-
-                               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
                                        goto next_cqe;
+                               }
 
-                               rxq->sw_rx_cons++;
                                sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
                                sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-                               qed_chain_consume(&rxq->rx_bd_ring);
+                               qede_rx_bd_ring_consume(rxq);
+
                                dma_unmap_page(&edev->pdev->dev,
                                               sw_rx_data->mapping,
                                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                pkt_len -= cur_size;
                        }
 
-                       if (pkt_len)
+                       if (unlikely(pkt_len))
                                DP_ERR(edev,
                                       "Mapped all BDs of jumbo, but still have %d bytes\n",
                                       pkt_len);
@@ -1349,10 +1485,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                skb_record_rx_queue(skb, fp->rss_id);
 
                qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
-               qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
-               rxq->sw_rx_cons++;
 next_rx_only:
                rx_pkt++;
 
@@ -1506,16 +1638,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
        edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
 
        edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
-       edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
-       edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
-       edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
-       edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
-       edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
-       edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
-       edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
-       edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
-       edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
-       edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+       edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+       edev->stats.rx_128_to_255_byte_packets =
+                               stats.rx_128_to_255_byte_packets;
+       edev->stats.rx_256_to_511_byte_packets =
+                               stats.rx_256_to_511_byte_packets;
+       edev->stats.rx_512_to_1023_byte_packets =
+                               stats.rx_512_to_1023_byte_packets;
+       edev->stats.rx_1024_to_1518_byte_packets =
+                               stats.rx_1024_to_1518_byte_packets;
+       edev->stats.rx_1519_to_1522_byte_packets =
+                               stats.rx_1519_to_1522_byte_packets;
+       edev->stats.rx_1519_to_2047_byte_packets =
+                               stats.rx_1519_to_2047_byte_packets;
+       edev->stats.rx_2048_to_4095_byte_packets =
+                               stats.rx_2048_to_4095_byte_packets;
+       edev->stats.rx_4096_to_9216_byte_packets =
+                               stats.rx_4096_to_9216_byte_packets;
+       edev->stats.rx_9217_to_16383_byte_packets =
+                               stats.rx_9217_to_16383_byte_packets;
        edev->stats.rx_crc_errors = stats.rx_crc_errors;
        edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
        edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1830,6 +1971,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
        edev->accept_any_vlan = false;
 }
 
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (edev->vxlan_dst_port)
+               return;
+
+       edev->vxlan_dst_port = t_port;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+       set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (t_port != edev->vxlan_dst_port)
+               return;
+
+       edev->vxlan_dst_port = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+       set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (edev->geneve_dst_port)
+               return;
+
+       edev->geneve_dst_port = t_port;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+       set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (t_port != edev->geneve_dst_port)
+               return;
+
+       edev->geneve_dst_port = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+       set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
 static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
@@ -1841,6 +2052,14 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
        .ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QEDE_VXLAN
+       .ndo_add_vxlan_port = qede_add_vxlan_port,
+       .ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+       .ndo_add_geneve_port = qede_add_geneve_port,
+       .ndo_del_geneve_port = qede_del_geneve_port,
+#endif
 };
 
 /* -------------------------------------------------------------------------
@@ -1875,8 +2094,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
        edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
        edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
-       DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
-
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        memset(&edev->stats, 0, sizeof(edev->stats));
@@ -1913,6 +2130,14 @@ static void qede_init_ndev(struct qede_dev *edev)
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                      NETIF_F_TSO | NETIF_F_TSO6;
 
+       /* Encap features*/
+       hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+                      NETIF_F_TSO_ECN;
+       ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                               NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+                               NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
        ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
                              NETIF_F_HIGHDMA;
        ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2013,6 +2238,8 @@ static void qede_sp_task(struct work_struct *work)
 {
        struct qede_dev *edev = container_of(work, struct qede_dev,
                                             sp_task.work);
+       struct qed_dev *cdev = edev->cdev;
+
        mutex_lock(&edev->qede_lock);
 
        if (edev->state == QEDE_STATE_OPEN) {
@@ -2020,6 +2247,24 @@ static void qede_sp_task(struct work_struct *work)
                        qede_config_rx_mode(edev->ndev);
        }
 
+       if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_vxlan_port = 1;
+               tunn_params.vxlan_port = edev->vxlan_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
+       if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_geneve_port = 1;
+               tunn_params.geneve_port = edev->geneve_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
        mutex_unlock(&edev->qede_lock);
 }
 
@@ -2027,9 +2272,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
 {
        struct qed_pf_params pf_params;
 
-       /* 16 rx + 16 tx */
+       /* 64 rx + 64 tx */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 32;
+       pf_params.eth_pf_params.num_cons = 128;
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2257,7 +2502,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
                struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
 
-               if (replace_buf) {
+               if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
                                       dma_unmap_addr(replace_buf, mapping),
                                       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2622,7 @@ err:
 static int qede_alloc_mem_rxq(struct qede_dev *edev,
                              struct qede_rx_queue *rxq)
 {
-       int i, rc, size, num_allocated;
+       int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
@@ -2394,6 +2639,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
        if (!rxq->sw_rx_ring) {
                DP_ERR(edev, "Rx buffers ring allocation failed\n");
+               rc = -ENOMEM;
                goto err;
        }
 
@@ -2421,26 +2667,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        /* Allocate buffers for the Rx ring */
        for (i = 0; i < rxq->num_rx_buffers; i++) {
                rc = qede_alloc_rx_buffer(edev, rxq);
-               if (rc)
-                       break;
-       }
-       num_allocated = i;
-       if (!num_allocated) {
-               DP_ERR(edev, "Rx buffers allocation failed\n");
-               goto err;
-       } else if (num_allocated < rxq->num_rx_buffers) {
-               DP_NOTICE(edev,
-                         "Allocated less buffers than desired (%d allocated)\n",
-                         num_allocated);
+               if (rc) {
+                       DP_ERR(edev,
+                              "Rx buffers allocation failed at index %d\n", i);
+                       goto err;
+               }
        }
 
-       qede_alloc_sge_mem(edev, rxq);
-
-       return 0;
-
+       rc = qede_alloc_sge_mem(edev, rxq);
 err:
-       qede_free_mem_rxq(edev, rxq);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2759,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
        }
 
        return 0;
-
 err:
-       qede_free_mem_fp(edev, fp);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2783,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
                struct qede_fastpath *fp = &edev->fp_array[rss_id];
 
                rc = qede_alloc_mem_fp(edev, fp);
-               if (rc)
-                       break;
-       }
-
-       if (rss_id != QEDE_RSS_CNT(edev)) {
-               /* Failed allocating memory for all the queues */
-               if (!rss_id) {
+               if (rc) {
                        DP_ERR(edev,
-                              "Failed to allocate memory for the leading queue\n");
-                       rc = -ENOMEM;
-               } else {
-                       DP_NOTICE(edev,
-                                 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
-                                 QEDE_RSS_CNT(edev), rss_id);
+                              "Failed to allocate memory for fastpath - rss id = %d\n",
+                              rss_id);
+                       qede_free_mem_load(edev);
+                       return rc;
                }
-               edev->num_rss = rss_id;
        }
 
        return 0;
@@ -2835,10 +3060,10 @@ static int qede_start_queues(struct qede_dev *edev)
        int rc, tc, i;
        int vlan_removal_en = 1;
        struct qed_dev *cdev = edev->cdev;
-       struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
        struct qed_update_vport_params vport_update_params;
        struct qed_queue_start_common_params q_params;
        struct qed_start_vport_params start = {0};
+       bool reset_rss_indir = false;
 
        if (!edev->num_rss) {
                DP_ERR(edev,
@@ -2933,16 +3158,50 @@ static int qede_start_queues(struct qede_dev *edev)
        /* Fill struct with RSS params */
        if (QEDE_RSS_CNT(edev) > 1) {
                vport_update_params.update_rss_flg = 1;
-               for (i = 0; i < 128; i++)
-                       rss_params->rss_ind_table[i] =
-                       ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
-               netdev_rss_key_fill(rss_params->rss_key,
-                                   sizeof(rss_params->rss_key));
+
+               /* Need to validate current RSS config uses valid entries */
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                       if (edev->rss_params.rss_ind_table[i] >=
+                           edev->num_rss) {
+                               reset_rss_indir = true;
+                               break;
+                       }
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+                   reset_rss_indir) {
+                       u16 val;
+
+                       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                               u16 indir_val;
+
+                               val = QEDE_RSS_CNT(edev);
+                               indir_val = ethtool_rxfh_indir_default(i, val);
+                               edev->rss_params.rss_ind_table[i] = indir_val;
+                       }
+                       edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+                       netdev_rss_key_fill(edev->rss_params.rss_key,
+                                           sizeof(edev->rss_params.rss_key));
+                       edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+                       edev->rss_params.rss_caps = QED_RSS_IPV4 |
+                                                   QED_RSS_IPV6 |
+                                                   QED_RSS_IPV4_TCP |
+                                                   QED_RSS_IPV6_TCP;
+                       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+               }
+
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
        } else {
-               memset(rss_params, 0, sizeof(*rss_params));
+               memset(&vport_update_params.rss_params, 0,
+                      sizeof(vport_update_params.rss_params));
        }
-       memcpy(&vport_update_params.rss_params, rss_params,
-              sizeof(*rss_params));
 
        rc = edev->ops->vport_update(cdev, &vport_update_params);
        if (rc) {
@@ -3124,12 +3383,24 @@ void qede_reload(struct qede_dev *edev,
 static int qede_open(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
+       int rc;
 
        netif_carrier_off(ndev);
 
        edev->ops->common->set_power_state(edev->cdev, PCI_D0);
 
-       return qede_load(edev, QEDE_LOAD_NORMAL);
+       rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+       vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+       geneve_get_rx_port(ndev);
+#endif
+       return 0;
 }
 
 static int qede_close(struct net_device *ndev)
index 55007f1e6bbcc77981414e9db1e73d8db8502988..caf6ddb7ea76f89f0a5ea1fd87e8363b8f89878a 100644 (file)
@@ -37,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 63
-#define QLCNIC_LINUX_VERSIONID  "5.3.63"
+#define _QLCNIC_LINUX_SUBVERSION 64
+#define QLCNIC_LINUX_VERSIONID  "5.3.64"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
index 1205f6f9c941735a9f7bafb88e36705e4a5de7f2..1c29105b6c364f0064da46f0e060e11208b2526e 100644 (file)
@@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
 
 static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
 {
-       return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
-                               PCI_ERS_RESULT_RECOVERED;
+       pci_ers_result_t res;
+
+       rtnl_lock();
+       res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+                                        PCI_ERS_RESULT_RECOVERED;
+       rtnl_unlock();
+
+       return res;
 }
 
 static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
index b28e73ea2c258850f1a00cb195e45f4751900f43..83d72106471c1237171f68955a1885df3ed950c1 100644 (file)
@@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
        /*
         * Set up the operating parameters.
         */
-       qdev->workqueue = create_singlethread_workqueue(ndev->name);
+       qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
index 1ef03939d25f478356112fbe376ff79cfd1e8eaa..6e2add97947128804668a9cdb08bac22f1b888d2 100644 (file)
@@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
                qca->stats.ring_full++;
        }
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        if (qca->spi_thread &&
            qca->spi_thread->state != TASK_RUNNING)
@@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
        struct qcaspi *qca = netdev_priv(dev);
 
        netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
-                   jiffies, jiffies - dev->trans_start);
+                   jiffies, jiffies - dev_trans_start(dev));
        qca->net_dev->stats.tx_errors++;
        /* Trigger tx queue flush and QCA7000 reset */
        qca->sync = QCASPI_SYNC_UNKNOWN;
index d77d60ea820255cd41bc6d37606dfdcd7c4f0395..5cb96785fb632703de8e9c08fe7fca0f37949cbd 100644 (file)
@@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev)
        dev->stats.tx_errors++;
        /* Try to restart the adapter. */
        hardware_init(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
        dev->stats.tx_errors++;
 }
index 5c1624147778a509f59282d74d5a79941c66d776..4e5d5e953e15b680a2660a5b381d0a2eaec28782 100644 (file)
@@ -1045,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
                 u32 set);
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
 
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_interrupt(struct net_device *ndev);
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
 void ravb_ptp_stop(struct net_device *ndev);
 
index 4b71951e185d8ad73e145d95693be097c46e33d0..34066e0649f5c673b6d623c2d2056340a5c39801 100644 (file)
@@ -246,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
                /* RX descriptor */
                rx_desc = &priv->rx_ring[q][i];
-               /* The size of the buffer should be on 16-byte boundary. */
-               rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+               rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
-                                         ALIGN(PKT_BUF_SZ, 16),
+                                         PKT_BUF_SZ,
                                          DMA_FROM_DEVICE);
                /* We just set the data size to 0 for a failed mapping which
                 * should prevent DMA from happening...
@@ -558,7 +557,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                        skb = priv->rx_skb[q][entry];
                        priv->rx_skb[q][entry] = NULL;
                        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-                                        ALIGN(PKT_BUF_SZ, 16),
+                                        PKT_BUF_SZ,
                                         DMA_FROM_DEVICE);
                        get_ts &= (q == RAVB_NC) ?
                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -588,8 +587,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
                desc = &priv->rx_ring[q][entry];
-               /* The size of the buffer should be on 16-byte boundary. */
-               desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+               desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
 
                if (!priv->rx_skb[q][entry]) {
                        skb = netdev_alloc_skb(ndev,
@@ -807,8 +805,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
        }
 
        /* gPTP interrupt status summary */
-       if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+       if (iss & ISS_CGIS) {
+               ravb_ptp_interrupt(ndev);
                result = IRQ_HANDLED;
+       }
 
        mmiowb();
        spin_unlock(&priv->lock);
@@ -838,8 +838,10 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
        }
 
        /* gPTP interrupt status summary */
-       if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+       if (iss & ISS_CGIS) {
+               ravb_ptp_interrupt(ndev);
                result = IRQ_HANDLED;
+       }
 
        mmiowb();
        spin_unlock(&priv->lock);
@@ -1850,6 +1852,9 @@ static int ravb_set_gti(struct net_device *ndev)
        rate = clk_get_rate(clk);
        clk_put(clk);
 
+       if (!rate)
+               return -EINVAL;
+
        inc = 1000000000ULL << 20;
        do_div(inc, rate);
 
index f1b2cbb336e8343aab95e301cac778a099d41950..eede70ec37f8c02759727ff7e881464adf7fe707 100644 (file)
@@ -296,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
 };
 
 /* Caller must hold the lock */
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+void ravb_ptp_interrupt(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        u32 gis = ravb_read(ndev, GIS);
@@ -319,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
                }
        }
 
-       if (gis) {
-               ravb_write(ndev, ~gis, GIS);
-               return IRQ_HANDLED;
-       }
-
-       return IRQ_NONE;
+       ravb_write(ndev, ~gis, GIS);
 }
 
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
index 004e2d7560fd86e289980a2526a8e6e9f4fb77c3..04cd39f66cc9c975483242b1cd7307708511cff6 100644 (file)
@@ -482,7 +482,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
        /* reset device */
-       sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+       sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
        mdelay(1);
 }
 
@@ -537,11 +537,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
 
 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 {
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       /* reset device */
-       sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
-       mdelay(1);
+       sh_eth_chip_reset(ndev);
 
        sh_eth_select_mii(ndev);
 }
@@ -725,8 +721,8 @@ static struct sh_eth_cpu_data sh7757_data = {
 #define GIGA_MAHR(port)                (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 static void sh_eth_chip_reset_giga(struct net_device *ndev)
 {
-       int i;
        u32 mahr[2], malr[2];
+       int i;
 
        /* save MAHR and MALR */
        for (i = 0; i < 2; i++) {
@@ -734,9 +730,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
                mahr[i] = ioread32((void *)GIGA_MAHR(i));
        }
 
-       /* reset device */
-       iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
-       mdelay(1);
+       sh_eth_chip_reset(ndev);
 
        /* restore MAHR and MALR */
        for (i = 0; i < 2; i++) {
@@ -899,7 +893,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
        int cnt = 100;
 
        while (cnt > 0) {
-               if (!(sh_eth_read(ndev, EDMR) & 0x3))
+               if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
                        break;
                mdelay(1);
                cnt--;
@@ -1229,7 +1223,7 @@ ring_free:
        return -ENOMEM;
 }
 
-static int sh_eth_dev_init(struct net_device *ndev, bool start)
+static int sh_eth_dev_init(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int ret;
@@ -1279,10 +1273,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
                     RFLR);
 
        sh_eth_modify(ndev, EESR, 0, 0);
-       if (start) {
-               mdp->irq_enabled = true;
-               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-       }
+       mdp->irq_enabled = true;
+       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 
        /* PAUSE Prohibition */
        sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
@@ -1295,8 +1287,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
 
        /* E-MAC Interrupt Enable register */
-       if (start)
-               sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+       sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
 
        /* Set MAC address */
        update_mac_address(ndev);
@@ -1309,10 +1300,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        if (mdp->cd->tpauser)
                sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 
-       if (start) {
-               /* Setting the Rx mode will start the Rx process. */
-               sh_eth_write(ndev, EDRRR_R, EDRRR);
-       }
+       /* Setting the Rx mode will start the Rx process. */
+       sh_eth_write(ndev, EDRRR_R, EDRRR);
 
        return ret;
 }
@@ -2194,17 +2183,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                                   __func__);
                        return ret;
                }
-               ret = sh_eth_dev_init(ndev, false);
+               ret = sh_eth_dev_init(ndev);
                if (ret < 0) {
                        netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
                                   __func__);
                        return ret;
                }
 
-               mdp->irq_enabled = true;
-               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-               /* Setting the Rx mode will start the Rx process. */
-               sh_eth_write(ndev, EDRRR_R, EDRRR);
                netif_device_attach(ndev);
        }
 
@@ -2250,7 +2235,7 @@ static int sh_eth_open(struct net_device *ndev)
                goto out_free_irq;
 
        /* device init */
-       ret = sh_eth_dev_init(ndev, true);
+       ret = sh_eth_dev_init(ndev);
        if (ret)
                goto out_free_irq;
 
@@ -2303,7 +2288,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
        }
 
        /* device init */
-       sh_eth_dev_init(ndev, true);
+       sh_eth_dev_init(ndev);
 
        netif_start_queue(ndev);
 }
index 8fa4ef3a7fdd2f851707c2eecc0db46671fe8fde..c62380e34a1d077d6e44e6111b3af86fad27c91e 100644 (file)
@@ -394,7 +394,7 @@ enum RPADIR_BIT {
 #define DEFAULT_FDR_INIT       0x00000707
 
 /* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+enum ARSTR_BIT { ARSTR_ARST = 0x00000001, };
 
 /* TSU_FWEN0 */
 enum TSU_FWEN0_BIT {
index ca73366057486a4bc753e7fb00740cec5e748221..c2bd5378ffdaf005d039785bccfe59f52b2d6c8d 100644 (file)
@@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
        if (err)
                return err;
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 
        return 0;
@@ -648,7 +648,7 @@ static void timeout(struct net_device *dev)
        printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
        sgiseeq_reset(dev);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index 98d33d462c6ca0f6b78499cde0b8d3e2b51f8b07..1681084cc96f8270d36e55d9ede099a58632566a 100644 (file)
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
                return 0;
        }
 
+       if (nic_data->datapath_caps &
+           1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+               return -EOPNOTSUPP;
+
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
                       nic_data->vport_id);
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                                      bool replacing)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u32 flags = spec->flags;
 
        memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
 
+       /* Remove RSS flag if we don't have an RSS context. */
+       if (flags & EFX_FILTER_FLAG_RX_RSS &&
+           spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+           nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+               flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
        if (replacing) {
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
                               MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
                       0 : spec->dmaq_id);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
-                      (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+                      (flags & EFX_FILTER_FLAG_RX_RSS) ?
                       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
                       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
-       if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+       if (flags & EFX_FILTER_FLAG_RX_RSS)
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
                               spec->rss_context !=
                               EFX_FILTER_RSS_CONTEXT_DEFAULT ?
index 5eac523b4b0ca6c557e9acd59d3b12207fa802ca..aaa80f13859b160f81b19e776a0cac422e302008 100644 (file)
@@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev)
        mace->eth.dma_ctrl = priv->dma_ctrl;
 
        meth_add_to_tx_ring(priv, skb);
-       dev->trans_start = jiffies; /* save the timestamp */
+       netif_trans_update(dev); /* save the timestamp */
 
        /* If TX ring is full, tell the upper layer to stop sending packets */
        if (meth_tx_full(dev)) {
@@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev)
        /* Enable interrupt */
        spin_unlock_irqrestore(&priv->meth_lock, flags);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index fd812d2e5e1c863bbb95a159617ad6beadbbeaec..95001ee408ab1c7229c3bb4067e68927d41f7bb3 100644 (file)
@@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
 
        spin_unlock_irqrestore(&sis_priv->lock, flags);
 
-       net_dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(net_dev); /* prevent tx timeout */
 
        /* load Transmit Descriptor Register */
        sw32(txdp, sis_priv->tx_ring_dma);
index 443f1da9fc9e26948a7f48ac7e504a9bfa29086b..7186b89269ad718a2723a9b2f6f79d77e1061aef 100644 (file)
@@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev)
                ew32(COMMAND, TxQueued);
        }
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        if (!ep->tx_full)
                netif_wake_queue(dev);
index a733868a43aa893aa6246bbef5ff6d76a52b091e..cb49c9654f0a7c2d8d0bb922bc4b38c7e6180419 100644 (file)
@@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
        /* DMA complete IRQ will free buffer and set jiffies */
 #else
        SMC_PUSH_DATA(lp, buf, len);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev_kfree_skb_irq(skb);
 #endif
        if (!lp->tx_throttle) {
@@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data)
        DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
        BUG_ON(skb == NULL);
        dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev_kfree_skb_irq(skb);
        lp->current_tx_skb = NULL;
        if (lp->pending_tx_skb != NULL)
@@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev)
                schedule_work(&lp->phy_configure);
 
        /* We can accept TX packets again */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index 664f596971b5f6351654bae97c107e6f0b6ce249..d496888b85d34b89331c37c9768d07e6eff7ea64 100644 (file)
@@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
        lp->saved_skb = NULL;
        dev_kfree_skb_any (skb);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* we can send another packet */
        netif_wake_queue(dev);
@@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev)
        /* "kick" the adaptor */
        smc_reset( dev->base_addr );
        smc_enable( dev->base_addr );
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        /* clear anything saved */
        ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
        netif_wake_queue(dev);
index 3449893aea8d402fb2fc56582df92a04aa157c10..db3c696d7002ad921bbf8d4d3257f7e738c36c07 100644 (file)
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
 
     smc->saved_skb = NULL;
     dev_kfree_skb_irq(skb);
-    dev->trans_start = jiffies;
+    netif_trans_update(dev);
     netif_start_queue(dev);
 }
 
@@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev)
                  inw(ioaddr)&0xff, inw(ioaddr + 2));
     dev->stats.tx_errors++;
     smc_reset(dev);
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
     smc->saved_skb = NULL;
     netif_wake_queue(dev);
 }
index c5ed27c54724a9a118f46f4316ad96ae71dc5a7c..18ac52ded6965a5f5dbe811aa33d6ba11b19686a 100644 (file)
@@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data)
        SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
        smc_special_unlock(&lp->lock, flags);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += len;
 
@@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev)
                schedule_work(&lp->phy_configure);
 
        /* We can accept TX packets again */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 }
 
index f0d797ab74d8f2ea927c6956525b728b9c20ddca..f13499fa1f581581501c33b42f173f26291a6be2 100644 (file)
@@ -34,6 +34,9 @@
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
 
+#define SYSMGR_FPGAGRP_MODULE_REG  0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
 #define EMAC_SPLITTER_CTRL_REG                 0x0
 #define EMAC_SPLITTER_CTRL_SPEED_MASK          0x3
 #define EMAC_SPLITTER_CTRL_SPEED_10            0x2
@@ -89,15 +92,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
        struct device_node *np_splitter;
        struct resource res_splitter;
 
-       dwmac->stmmac_rst = devm_reset_control_get(dev,
-                                                 STMMAC_RESOURCE_NAME);
-       if (IS_ERR(dwmac->stmmac_rst)) {
-               dev_info(dev, "Could not get reset control!\n");
-               if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               dwmac->stmmac_rst = NULL;
-       }
-
        dwmac->interface = of_get_phy_mode(np);
 
        sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -142,13 +136,13 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
        return 0;
 }
 
-static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
 {
        struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
        int phymode = dwmac->interface;
        u32 reg_offset = dwmac->reg_offset;
        u32 reg_shift = dwmac->reg_shift;
-       u32 ctrl, val;
+       u32 ctrl, val, module;
 
        switch (phymode) {
        case PHY_INTERFACE_MODE_RGMII:
@@ -171,48 +165,26 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        if (dwmac->splitter_base)
                val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
+       /* Assert reset to the enet controller before changing the phy mode */
+       if (dwmac->stmmac_rst)
+               reset_control_assert(dwmac->stmmac_rst);
+
        regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
        ctrl |= val << reg_shift;
 
-       if (dwmac->f2h_ptp_ref_clk)
+       if (dwmac->f2h_ptp_ref_clk) {
                ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
-       else
+               regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                           &module);
+               module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+               regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                            module);
+       } else {
                ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+       }
 
        regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
-       return 0;
-}
-
-static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
-{
-       struct socfpga_dwmac    *dwmac = priv;
-
-       /* On socfpga platform exit, assert and hold reset to the
-        * enet controller - the default state after a hard reset.
-        */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
-}
-
-static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
-{
-       struct socfpga_dwmac    *dwmac = priv;
-       struct net_device *ndev = platform_get_drvdata(pdev);
-       struct stmmac_priv *stpriv = NULL;
-       int ret = 0;
-
-       if (ndev)
-               stpriv = netdev_priv(ndev);
-
-       /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
-
-       /* Setup the phy mode in the system manager registers according to
-        * devicetree configuration
-        */
-       ret = socfpga_dwmac_setup(dwmac);
 
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
@@ -220,25 +192,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
        if (dwmac->stmmac_rst)
                reset_control_deassert(dwmac->stmmac_rst);
 
-       /* Before the enet controller is suspended, the phy is suspended.
-        * This causes the phy clock to be gated. The enet controller is
-        * resumed before the phy, so the clock is still gated "off" when
-        * the enet controller is resumed. This code makes sure the phy
-        * is "resumed" before reinitializing the enet controller since
-        * the enet controller depends on an active phy clock to complete
-        * a DMA reset. A DMA reset will "time out" if executed
-        * with no phy clock input on the Synopsys enet controller.
-        * Verified through Synopsys Case #8000711656.
-        *
-        * Note that the phy clock is also gated when the phy is isolated.
-        * Phy "suspend" and "isolate" controls are located in phy basic
-        * control register 0, and can be modified by the phy driver
-        * framework.
-        */
-       if (stpriv && stpriv->phydev)
-               phy_resume(stpriv->phydev);
-
-       return ret;
+       return 0;
 }
 
 static int socfpga_dwmac_probe(struct platform_device *pdev)
@@ -267,23 +221,58 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = socfpga_dwmac_setup(dwmac);
-       if (ret) {
-               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-               return ret;
-       }
-
        plat_dat->bsp_priv = dwmac;
-       plat_dat->init = socfpga_dwmac_init;
-       plat_dat->exit = socfpga_dwmac_exit;
        plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
 
-       ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
-       if (ret)
-               return ret;
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (!ret) {
+               struct net_device *ndev = platform_get_drvdata(pdev);
+               struct stmmac_priv *stpriv = netdev_priv(ndev);
+
+               /* The socfpga driver needs to control the stmmac reset to
+                * set the phy mode. Create a copy of the core reset handel
+                * so it can be used by the driver later.
+                */
+               dwmac->stmmac_rst = stpriv->stmmac_rst;
+
+               ret = socfpga_dwmac_set_phy_mode(dwmac);
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
 
-       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       /* Before the enet controller is suspended, the phy is suspended.
+        * This causes the phy clock to be gated. The enet controller is
+        * resumed before the phy, so the clock is still gated "off" when
+        * the enet controller is resumed. This code makes sure the phy
+        * is "resumed" before reinitializing the enet controller since
+        * the enet controller depends on an active phy clock to complete
+        * a DMA reset. A DMA reset will "time out" if executed
+        * with no phy clock input on the Synopsys enet controller.
+        * Verified through Synopsys Case #8000711656.
+        *
+        * Note that the phy clock is also gated when the phy is isolated.
+        * Phy "suspend" and "isolate" controls are located in phy basic
+        * control register 0, and can be modified by the phy driver
+        * framework.
+        */
+       if (priv->phydev)
+               phy_resume(priv->phydev);
+
+       return stmmac_resume(dev);
 }
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+                                              socfpga_dwmac_resume);
 
 static const struct of_device_id socfpga_dwmac_match[] = {
        { .compatible = "altr,socfpga-stmmac" },
@@ -296,7 +285,7 @@ static struct platform_driver socfpga_dwmac_driver = {
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "socfpga-dwmac",
-               .pm             = &stmmac_pltfr_pm_ops,
+               .pm             = &socfpga_dwmac_pm_ops,
                .of_match_table = socfpga_dwmac_match,
        },
 };
index d4952c7a836dbcf4cdd8346e51e41f2c837d858c..4ec7397e7fb378d1d82368c5fb9aff28e5340c6c 100644 (file)
@@ -254,14 +254,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 {
        unsigned int tdes3 = p->des3;
 
-       if (unlikely(len > BUF_SIZE_16KiB)) {
-               p->des2 |= (((len - BUF_SIZE_16KiB) <<
-                            TDES2_BUFFER2_SIZE_MASK_SHIFT)
-                           & TDES2_BUFFER2_SIZE_MASK)
-                           | (BUF_SIZE_16KiB & TDES2_BUFFER1_SIZE_MASK);
-       } else {
-               p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
-       }
+       p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
 
        if (is_fs)
                tdes3 |= TDES3_FIRST_DESCRIPTOR;
index ff6750621ff780286b673cdd4573cb8eec0c5237..59ae6088cd226aed73bd2aca25bff5fdd6ee3dde 100644 (file)
@@ -148,9 +148,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
 
 int stmmac_ptp_register(struct stmmac_priv *priv);
 void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_resume(struct net_device *ndev);
-int stmmac_suspend(struct net_device *ndev);
-int stmmac_dvr_remove(struct net_device *ndev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
 int stmmac_dvr_probe(struct device *device,
                     struct plat_stmmacenet_data *plat_dat,
                     struct stmmac_resources *res);
index 3a13ddd3aac1bdfc48393107a70b407b36061fc6..fd5ab7bfdb765e6a2ae91d3b6e2247c3c9301a49 100644 (file)
@@ -289,10 +289,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
            (priv->pcs == STMMAC_PCS_RTBI))
                goto out;
 
-       /* Never init EEE in case of a switch is attached */
-       if (priv->phydev->is_pseudo_fixed_link)
-               goto out;
-
        /* MAC core supports the EEE feature. */
        if (priv->dma_cap.eee) {
                int tx_lpi_timer = priv->tx_lpi_timer;
@@ -774,10 +770,16 @@ static void stmmac_adjust_link(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /* At this stage, it could be needed to setup the EEE or adjust some
-        * MAC related HW registers.
-        */
-       priv->eee_enabled = stmmac_eee_init(priv);
+       if (phydev->is_pseudo_fixed_link)
+               /* Stop PHY layer to call the hook to adjust the link in case
+                * of a switch is attached to the stmmac driver.
+                */
+               phydev->irq = PHY_IGNORE_INTERRUPT;
+       else
+               /* At this stage, init the EEE if supported.
+                * Never called in case of fixed_link.
+                */
+               priv->eee_enabled = stmmac_eee_init(priv);
 }
 
 /**
@@ -868,10 +870,6 @@ static int stmmac_init_phy(struct net_device *dev)
                return -ENODEV;
        }
 
-       /* If attached to a switch, there is no reason to poll phy handler */
-       if (phydev->is_pseudo_fixed_link)
-               phydev->irq = PHY_IGNORE_INTERRUPT;
-
        pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
                 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
 
@@ -3352,12 +3350,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
 
 /**
  * stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
  * changes the link status, releases the DMA descriptor rings.
  */
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
 {
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
 
        pr_info("%s:\n\tremoving driver", __func__);
@@ -3383,13 +3382,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
 
 /**
  * stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: this is the function to suspend the device and it is called
  * by the platform driver to stop the network queue, release the resources,
  * program the PMT register (for WoL), clean and release driver resources.
  */
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
 {
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned long flags;
 
@@ -3432,12 +3432,13 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
 
 /**
  * stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: when resume this function is invoked to setup the DMA and CORE
  * in a usable state.
  */
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
 {
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned long flags;
 
index 06704ca6f9ca3ec00aea1a4302fd0757a888fb45..3f83c369f56c46f306d84916586e872c05b6067e 100644 (file)
 #define MII_BUSY 0x00000001
 #define MII_WRITE 0x00000002
 
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT            2
+#define MII_GMAC4_WRITE                        (1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ                 (3 << MII_GMAC4_GOC_SHIFT)
+
+#define MII_PHY_ADDR_GMAC4_SHIFT       21
+#define MII_PHY_ADDR_GMAC4_MASK                GENMASK(25, 21)
+#define MII_PHY_REG_GMAC4_SHIFT                16
+#define MII_PHY_REG_GMAC4_MASK         GENMASK(20, 16)
+#define MII_CSR_CLK_GMAC4_SHIFT                8
+#define MII_CSR_CLK_GMAC4_MASK         GENMASK(11, 8)
+
 static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
 {
        unsigned long curr;
@@ -123,6 +135,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
        return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
 }
 
+/**
+ * stmmac_mdio_read_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * Description: it reads data from the MII register of GMAC4 from within
+ * the phy device.
+ */
+static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+       struct net_device *ndev = bus->priv;
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       unsigned int mii_address = priv->hw->mii.addr;
+       unsigned int mii_data = priv->hw->mii.data;
+       int data;
+       u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+                    (MII_PHY_ADDR_GMAC4_MASK)) |
+                    ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+                    (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+
+       value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+                << MII_CSR_CLK_GMAC4_SHIFT);
+
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
+
+       writel(value, priv->ioaddr + mii_address);
+
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
+
+       /* Read the data from the MII data register */
+       data = (int)readl(priv->ioaddr + mii_data);
+
+       return data;
+}
+
+/**
+ * stmmac_mdio_write_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * @phydata: phy data
+ * Description: it writes the data into the MII register of GMAC4 from within
+ * the device.
+ */
+static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+                                  u16 phydata)
+{
+       struct net_device *ndev = bus->priv;
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       unsigned int mii_address = priv->hw->mii.addr;
+       unsigned int mii_data = priv->hw->mii.data;
+
+       u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+                    (MII_PHY_ADDR_GMAC4_MASK)) |
+                    ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+                    (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+
+       value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+                << MII_CSR_CLK_GMAC4_SHIFT);
+
+       /* Wait until any existing MII operation is complete */
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
+
+       /* Set the MII address register to write */
+       writel(phydata, priv->ioaddr + mii_data);
+       writel(value, priv->ioaddr + mii_address);
+
+       /* Wait until any existing MII operation is complete */
+       return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
 /**
  * stmmac_mdio_reset
  * @bus: points to the mii_bus structure
@@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
 
        /* This is a workaround for problems with the STE101P PHY.
         * It doesn't complete its reset until at least one clock cycle
-        * on MDC, so perform a dummy mdio read.
+        * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+        * if needed.
         */
-       writel(0, priv->ioaddr + mii_address);
+       if (!priv->plat->has_gmac4)
+               writel(0, priv->ioaddr + mii_address);
 #endif
        return 0;
 }
@@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev)
 #endif
 
        new_bus->name = "stmmac";
-       new_bus->read = &stmmac_mdio_read;
-       new_bus->write = &stmmac_mdio_write;
+       if (priv->plat->has_gmac4) {
+               new_bus->read = &stmmac_mdio_read_gmac4;
+               new_bus->write = &stmmac_mdio_write_gmac4;
+       } else {
+               new_bus->read = &stmmac_mdio_read;
+               new_bus->write = &stmmac_mdio_write;
+       }
+
        new_bus->reset = &stmmac_mdio_reset;
        snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                 new_bus->name, priv->plat->bus_id);
index ae4388735b7fb560649da51cf823caf4f1dbdbb6..56c8a2342c14744700940db291772e4ec7e1bb9d 100644 (file)
@@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
  */
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
-       struct net_device *ndev = pci_get_drvdata(pdev);
-
-       stmmac_dvr_remove(ndev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stmmac_pci_suspend(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct net_device *ndev = pci_get_drvdata(pdev);
-
-       return stmmac_suspend(ndev);
-}
-
-static int stmmac_pci_resume(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct net_device *ndev = pci_get_drvdata(pdev);
-
-       return stmmac_resume(ndev);
+       stmmac_dvr_remove(&pdev->dev);
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
 
 #define STMMAC_VENDOR_ID 0x700
 #define STMMAC_QUARK_ID  0x0937
index effaa4ff5ab7b43a19f0db167e8c94a8a734f425..409db913b117e65b2251f2d1b50ff0f0638ee2f0 100644 (file)
@@ -386,7 +386,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
-       int ret = stmmac_dvr_remove(ndev);
+       int ret = stmmac_dvr_remove(&pdev->dev);
 
        if (priv->plat->exit)
                priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -410,7 +410,7 @@ static int stmmac_pltfr_suspend(struct device *dev)
        struct stmmac_priv *priv = netdev_priv(ndev);
        struct platform_device *pdev = to_platform_device(dev);
 
-       ret = stmmac_suspend(ndev);
+       ret = stmmac_suspend(dev);
        if (priv->plat->exit)
                priv->plat->exit(pdev, priv->plat->bsp_priv);
 
@@ -433,7 +433,7 @@ static int stmmac_pltfr_resume(struct device *dev)
        if (priv->plat->init)
                priv->plat->init(pdev, priv->plat->bsp_priv);
 
-       return stmmac_resume(ndev);
+       return stmmac_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index 9cc45649f477fbf241d7e3741cd7772b46f1f4c7..a2371aa14a49f840702b5867f5ad80f6b520ee16 100644 (file)
@@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static void niu_netif_stop(struct niu *np)
 {
-       np->dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(np->dev);    /* prevent tx timeout */
 
        niu_disable_napi(np);
 
index 2437227712dcd0ee5fd0404113e35527646a8b76..d6ad0fbd054e10bf34d48d2f522b0660781de02d 100644 (file)
@@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp)
 
 static inline void gem_netif_stop(struct gem *gp)
 {
-       gp->dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(gp->dev);    /* prevent tx timeout */
        napi_disable(&gp->napi);
        netif_tx_disable(gp->dev);
 }
index af11ed1e0bcc09b4ed4fa5e94df8945fab4d581e..158213cd6cddf98c4f2f511a40e31487e3a70ac0 100644 (file)
@@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev)
 
        if (status_change) {
                if (phydev->link) {
-                       lp->ndev->trans_start = jiffies;
+                       netif_trans_update(lp->ndev);
                        dwceqos_link_up(lp);
                } else {
                        dwceqos_link_down(lp);
@@ -2203,7 +2203,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        netdev_sent_queue(ndev, skb->len);
        spin_unlock_bh(&lp->tx_lock);
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        return 0;
 
 tx_error:
index 14c9d1baa85cebb9c531ae3cf74d87d8e1cb06fb..7452b5f9d02427469e58caf516c02b6de4a44a4e 100644 (file)
@@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
  * o NETDEV_TX_BUSY Cannot transmit packet, try later
  *   Usually a bug, means queue start/stop flow control is broken in
  *   the driver. Note: the driver must NOT put the skb in its DMA ring.
- * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
  */
 static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
                                   struct net_device *ndev)
@@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
 
        ENTER;
        local_irq_save(flags);
-       if (!spin_trylock(&priv->tx_lock)) {
-               local_irq_restore(flags);
-               DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
-                   BDX_DRV_NAME, ndev->name);
-               return NETDEV_TX_LOCKED;
-       }
+       spin_lock(&priv->tx_lock);
 
        /* build tx descriptor */
        BDX_ASSERT(f->m.wptr >= f->m.memsz);    /* started with valid wptr */
@@ -1707,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
 
 #endif
 #ifdef BDX_LLTX
-       ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+       netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
 #endif
        ndev->stats.tx_packets++;
        ndev->stats.tx_bytes += skb->len;
index 42fdfd4d9d4f9873c8b26f1518aa56b3c5e719b0..4b08a2f52b3e6074218fb6ba876a639855818d3d 100644 (file)
@@ -367,7 +367,6 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct device_node              *phy_node;
        struct napi_struct              napi_rx;
        struct napi_struct              napi_tx;
        struct device                   *dev;
@@ -381,7 +380,6 @@ struct cpsw_priv {
        u32                             coal_intvl;
        u32                             bus_freq_mhz;
        int                             rx_packet_max;
-       int                             host_port;
        struct clk                      *clk;
        u8                              mac_addr[ETH_ALEN];
        struct cpsw_slave               *slaves;
@@ -531,21 +529,18 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
                        int slave_port = cpsw_get_slave_port(priv,      \
                                                slave->slave_num);      \
                        cpsw_ale_add_mcast(priv->ale, addr,             \
-                               1 << slave_port | 1 << priv->host_port, \
+                               1 << slave_port | ALE_PORT_HOST,        \
                                ALE_VLAN, slave->port_vlan, 0);         \
                } else {                                                \
                        cpsw_ale_add_mcast(priv->ale, addr,             \
-                               ALE_ALL_PORTS << priv->host_port,       \
+                               ALE_ALL_PORTS,                          \
                                0, 0, 0);                               \
                }                                                       \
        } while (0)
 
 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
 {
-       if (priv->host_port == 0)
-               return slave_num + 1;
-       else
-               return slave_num;
+       return slave_num + 1;
 }
 
 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
@@ -602,8 +597,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
                        cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
 
                        /* Clear all mcast from ALE */
-                       cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-                                                priv->host_port, -1);
+                       cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
 
                        /* Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -648,8 +642,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
        /* Clear all mcast from ALE */
-       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
-                                vid);
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
 
        if (!netdev_mc_empty(ndev)) {
                struct netdev_hw_addr *ha;
@@ -1092,7 +1085,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
                struct cpsw_priv *priv, struct cpsw_slave *slave,
                u32 slave_port)
 {
-       u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+       u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
 
        if (priv->version == CPSW_VERSION_1)
                slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
@@ -1103,7 +1096,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
        cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                           port_mask, ALE_VLAN, slave->port_vlan, 0);
        cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-               priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+               HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1148,31 +1141,39 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-       if (priv->phy_node)
-               slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+       if (slave->data->phy_node) {
+               slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
                                 &cpsw_adjust_link, 0, slave->data->phy_if);
-       else
+               if (!slave->phy) {
+                       dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+                               slave->data->phy_node->full_name,
+                               slave->slave_num);
+                       return;
+               }
+       } else {
                slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
-       if (IS_ERR(slave->phy)) {
-               dev_err(priv->dev, "phy %s not found on slave %d\n",
-                       slave->data->phy_id, slave->slave_num);
-               slave->phy = NULL;
-       } else {
-               phy_attached_info(slave->phy);
+               if (IS_ERR(slave->phy)) {
+                       dev_err(priv->dev,
+                               "phy \"%s\" not found on slave %d, err %ld\n",
+                               slave->data->phy_id, slave->slave_num,
+                               PTR_ERR(slave->phy));
+                       slave->phy = NULL;
+                       return;
+               }
+       }
 
-               phy_start(slave->phy);
+       phy_attached_info(slave->phy);
 
-               /* Configure GMII_SEL register */
-               cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
-                            slave->slave_num);
-       }
+       phy_start(slave->phy);
+
+       /* Configure GMII_SEL register */
+       cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
 }
 
 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
 {
        const int vlan = priv->data.default_vlan;
-       const int port = priv->host_port;
        u32 reg;
        int i;
        int unreg_mcast_mask;
@@ -1190,9 +1191,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
        else
                unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
 
-       cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
-                         ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
-                         unreg_mcast_mask << port);
+       cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+                         ALE_ALL_PORTS, ALE_ALL_PORTS,
+                         unreg_mcast_mask);
 }
 
 static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1205,7 +1206,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
        cpsw_ale_start(priv->ale);
 
        /* switch to vlan unaware mode */
-       cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+       cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
                             CPSW_ALE_VLAN_AWARE);
        control_reg = readl(&priv->regs->control);
        control_reg |= CPSW_VLAN_AWARE;
@@ -1219,14 +1220,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
                     &priv->host_port_regs->cpdma_tx_pri_map);
        __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
 
-       cpsw_ale_control_set(priv->ale, priv->host_port,
+       cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
                             ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 
        if (!priv->data.dual_emac) {
-               cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+               cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
                                   0, 0);
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
-                                  1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+                                  ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
        }
 }
 
@@ -1251,12 +1252,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
        int i, ret;
        u32 reg;
 
+       pm_runtime_get_sync(&priv->pdev->dev);
+
        if (!cpsw_common_res_usage_state(priv))
                cpsw_intr_disable(priv);
        netif_carrier_off(ndev);
 
-       pm_runtime_get_sync(&priv->pdev->dev);
-
        reg = priv->version;
 
        dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
@@ -1273,8 +1274,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
                cpsw_add_default_vlan(priv);
        else
                cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
-                                 ALE_ALL_PORTS << priv->host_port,
-                                 ALE_ALL_PORTS << priv->host_port, 0, 0);
+                                 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 
        if (!cpsw_common_res_usage_state(priv)) {
                struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
@@ -1389,7 +1389,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        struct cpsw_priv *priv = netdev_priv(ndev);
        int ret;
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
 
        if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
@@ -1620,9 +1620,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
                flags = ALE_VLAN;
        }
 
-       cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+       cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
                           flags, vid);
-       cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+       cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
                           flags, vid);
 
        memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
@@ -1666,12 +1666,12 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
        }
 
        ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
-                               unreg_mcast_mask << priv->host_port);
+                               unreg_mcast_mask);
        if (ret != 0)
                return ret;
 
        ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-                                priv->host_port, ALE_VLAN, vid);
+                                HOST_PORT_NUM, ALE_VLAN, vid);
        if (ret != 0)
                goto clean_vid;
 
@@ -1683,7 +1683,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
 
 clean_vlan_ucast:
        cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
-                           priv->host_port, ALE_VLAN, vid);
+                          HOST_PORT_NUM, ALE_VLAN, vid);
 clean_vid:
        cpsw_ale_del_vlan(priv->ale, vid, 0);
        return ret;
@@ -1738,7 +1738,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
                return ret;
 
        ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
-                                priv->host_port, ALE_VLAN, vid);
+                                HOST_PORT_NUM, ALE_VLAN, vid);
        if (ret != 0)
                return ret;
 
@@ -1940,12 +1940,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
                         struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct device_node *slave_node;
-       struct cpsw_platform_data *data = &priv->data;
        int i = 0, ret;
        u32 prop;
 
@@ -2033,25 +2032,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                if (strcmp(slave_node->name, "slave"))
                        continue;
 
-               priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+               slave_data->phy_node = of_parse_phandle(slave_node,
+                                                       "phy-handle", 0);
                parp = of_get_property(slave_node, "phy_id", &lenp);
-               if (of_phy_is_fixed_link(slave_node)) {
-                       struct device_node *phy_node;
-                       struct phy_device *phy_dev;
-
+               if (slave_data->phy_node) {
+                       dev_dbg(&pdev->dev,
+                               "slave[%d] using phy-handle=\"%s\"\n",
+                               i, slave_data->phy_node->full_name);
+               } else if (of_phy_is_fixed_link(slave_node)) {
                        /* In the case of a fixed PHY, the DT node associated
                         * to the PHY is the Ethernet MAC DT node.
                         */
                        ret = of_phy_register_fixed_link(slave_node);
                        if (ret)
                                return ret;
-                       phy_node = of_node_get(slave_node);
-                       phy_dev = of_phy_find_device(phy_node);
-                       if (!phy_dev)
-                               return -ENODEV;
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, phy_dev->mdio.bus->id,
-                                phy_dev->mdio.addr);
+                       slave_data->phy_node = of_node_get(slave_node);
                } else if (parp) {
                        u32 phyid;
                        struct device_node *mdio_node;
@@ -2072,7 +2067,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                        snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                                 PHY_ID_FMT, mdio->name, phyid);
                } else {
-                       dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+                       dev_err(&pdev->dev,
+                               "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+                               i);
                        goto no_phy_slave;
                }
                slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2152,7 +2149,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 
        priv_sl2->regs = priv->regs;
-       priv_sl2->host_port = priv->host_port;
        priv_sl2->host_port_regs = priv->host_port_regs;
        priv_sl2->wr_regs = priv->wr_regs;
        priv_sl2->hw_stats = priv->hw_stats;
@@ -2275,7 +2271,7 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(priv, pdev)) {
+       if (cpsw_probe_dt(&priv->data, pdev)) {
                dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
@@ -2321,7 +2317,6 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_runtime_disable_ret;
        }
        priv->regs = ss_regs;
-       priv->host_port = HOST_PORT_NUM;
 
        /* Need to enable clocks with runtime PM api to access module
         * registers
index 442a7038e660c877b1cc7509efab7df214a25a87..e50afd1b2eda09d87a94b100646d774de52f32b5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/phy.h>
 
 struct cpsw_slave_data {
+       struct device_node *phy_node;
        char            phy_id[MII_BUS_ID_SIZE];
        int             phy_if;
        u8              mac_addr[ETH_ALEN];
index 5d9abedd6b757ca65000e5dea8155acf0eac9412..f56d66e6ec155194e1f432ee42950d9db915c697 100644 (file)
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 
        /* TODO: Add phy read and write and private statistics get feature */
 
-       return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       if (priv->phydev)
+               return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       else
+               return -EOPNOTSUPP;
 }
 
 static int match_first_device(struct device *dev, void *data)
@@ -1878,8 +1881,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
                pdata->hw_ram_addr = auxdata->hw_ram_addr;
        }
 
-       pdev->dev.platform_data = pdata;
-
        return  pdata;
 }
 
@@ -2101,6 +2102,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
        cpdma_ctlr_destroy(priv->dma);
 
        unregister_netdev(ndev);
+       pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
 
        return 0;
index 1d0942c531209ea5b581ede060398b33fa7d61a7..32516661f180bcc1ad7e3bb37a8d130c717290bd 100644 (file)
@@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        if (ret)
                goto drop;
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
 
        /* Check Tx pool count & stop subqueue if needed */
        desc_count = knav_pool_count(netcp->tx_pool);
@@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev)
 
        dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
        netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        netif_tx_wake_all_queues(ndev);
 }
 
index a274cd49afe954442bbd3bf647be71a92d0087ee..561703317312e2bbcc1047eeb7f2271b0895be7a 100644 (file)
@@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev)
        tlan_reset_lists(dev);
        tlan_read_and_clear_stats(dev, TLAN_IGNORE);
        tlan_reset_adapter(dev);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
 
 }
index 298e059d04986b90b3132d453d453d30f8abe062..922a443e3415bf0656ff7c989412958701d79f96 100644 (file)
@@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
 
 
        /* Save the timestamp. */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
 
 #ifdef TILE_NET_PARANOIA
@@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev)
 {
        PDEBUG("tile_net_tx_timeout()\n");
        PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
-              jiffies - dev->trans_start);
+              jiffies - dev_trans_start(dev));
 
        /* XXX: ISSUE: This doesn't seem useful for us. */
        netif_wake_queue(dev);
index 13214a6492ac5b1eced4d39c21b7736f5dcf19d4..743b18266a7c2b2be2b33f2d3f7fea0f9b80e361 100644 (file)
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
                        continue;
 
                /* copy hw scan info */
-               memcpy(target->hwinfo, scan_info, scan_info->size);
+               memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
                target->essid_len = strnlen(scan_info->essid,
                                            sizeof(scan_info->essid));
                target->rate_len = 0;
index 67610270d171a69d5e1ecf5e0c027fe98d053bba..36a6e8b54d9415d65bf1b6d2804a53949372eaa6 100644 (file)
@@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
        wmb();
        descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
 
-       card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+       netif_trans_update(card->netdev); /* set netdev watchdog timer */
        return 0;
 }
 
index 520cf50a3d5a14ed7621e8bd280ea2d0059ed8c1..01a77145a0fa487518b6233d5760d3a0a8866323 100644 (file)
@@ -1314,7 +1314,8 @@ static int tsi108_open(struct net_device *dev)
        data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
                                           GFP_KERNEL);
        if (!data->txring) {
-               pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+               pci_free_consistent(NULL, rxring_size, data->rxring,
+                                   data->rxdma);
                return -ENOMEM;
        }
 
index 2b7550c43f7800fe36e54fafacabfcd921c408f4..9d14731cdcb10ff28d839694ba7b3113d505708a 100644 (file)
@@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work)
 
        spin_unlock_bh(&rp->lock);
 
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        dev->stats.tx_errors++;
        netif_wake_queue(dev);
 
index f98b91d21f33303ad6bdcb3c35af0bffe7af7641..1981e88c18dc4973825078bf02bfc45dd80da0e7 100644 (file)
@@ -69,4 +69,18 @@ config WIZNET_BUS_ANY
          Performance may decrease compared to explicitly selected bus mode.
 endchoice
 
+config WIZNET_W5100_SPI
+       tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode"
+       depends on WIZNET_BUS_ANY && WIZNET_W5100
+       depends on SPI
+       ---help---
+         In SPI mode host system accesses registers using SPI protocol
+         (mode 0) on the SPI bus.
+
+         Performance decreases compared to other bus interface mode.
+         In W5100 SPI mode, burst READ/WRITE processing are not provided.
+
+         To compile this driver as a module, choose M here: the module
+         will be called w5100-spi.
+
 endif # NET_VENDOR_WIZNET
index c614535227e85c1ee61d74eb31b80029e924fa4c..1e05e1a842086f5e2dd6e121aa6506b2f858ab10 100644 (file)
@@ -1,2 +1,3 @@
 obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o
 obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
new file mode 100644 (file)
index 0000000..b868e45
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Ethernet driver for the WIZnet W5100/W5200/W5500 chip.
+ *
+ * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Datasheet:
+ * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf
+ * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf
+ * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/spi/spi.h>
+
+#include "w5100.h"
+
+#define W5100_SPI_WRITE_OPCODE 0xf0
+#define W5100_SPI_READ_OPCODE 0x0f
+
+static int w5100_spi_read(struct net_device *ndev, u32 addr)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff };
+       u8 data;
+       int ret;
+
+       ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+       return ret ? ret : data;
+}
+
+static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data};
+
+       return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5100_spi_read16(struct net_device *ndev, u32 addr)
+{
+       u16 data;
+       int ret;
+
+       ret = w5100_spi_read(ndev, addr);
+       if (ret < 0)
+               return ret;
+       data = ret << 8;
+       ret = w5100_spi_read(ndev, addr + 1);
+
+       return ret < 0 ? ret : data | ret;
+}
+
+static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+       int ret;
+
+       ret = w5100_spi_write(ndev, addr, data >> 8);
+       if (ret)
+               return ret;
+
+       return w5100_spi_write(ndev, addr + 1, data & 0xff);
+}
+
+static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+                             int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++) {
+               int ret = w5100_spi_read(ndev, addr + i);
+
+               if (ret < 0)
+                       return ret;
+               buf[i] = ret;
+       }
+
+       return 0;
+}
+
+static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+                              int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++) {
+               int ret = w5100_spi_write(ndev, addr + i, buf[i]);
+
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static const struct w5100_ops w5100_spi_ops = {
+       .may_sleep = true,
+       .chip_id = W5100,
+       .read = w5100_spi_read,
+       .write = w5100_spi_write,
+       .read16 = w5100_spi_read16,
+       .write16 = w5100_spi_write16,
+       .readbulk = w5100_spi_readbulk,
+       .writebulk = w5100_spi_writebulk,
+};
+
+#define W5200_SPI_WRITE_OPCODE 0x80
+
+struct w5200_spi_priv {
+       /* Serialize access to cmd_buf */
+       struct mutex cmd_lock;
+
+       /* DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       u8 cmd_buf[4] ____cacheline_aligned;
+};
+
+static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev)
+{
+       return w5100_ops_priv(ndev);
+}
+
+static int w5200_spi_init(struct net_device *ndev)
+{
+       struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+
+       mutex_init(&spi_priv->cmd_lock);
+
+       return 0;
+}
+
+static int w5200_spi_read(struct net_device *ndev, u32 addr)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 };
+       u8 data;
+       int ret;
+
+       ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+       return ret ? ret : data;
+}
+
+static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data };
+
+       return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_read16(struct net_device *ndev, u32 addr)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 };
+       __be16 data;
+       int ret;
+
+       ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+       return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[6] = {
+               addr >> 8, addr & 0xff,
+               W5200_SPI_WRITE_OPCODE, 2,
+               data >> 8, data & 0xff
+       };
+
+       return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+                             int len)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+       struct spi_transfer xfer[] = {
+               {
+                       .tx_buf = spi_priv->cmd_buf,
+                       .len = sizeof(spi_priv->cmd_buf),
+               },
+               {
+                       .rx_buf = buf,
+                       .len = len,
+               },
+       };
+       int ret;
+
+       mutex_lock(&spi_priv->cmd_lock);
+
+       spi_priv->cmd_buf[0] = addr >> 8;
+       spi_priv->cmd_buf[1] = addr;
+       spi_priv->cmd_buf[2] = len >> 8;
+       spi_priv->cmd_buf[3] = len;
+       ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+       mutex_unlock(&spi_priv->cmd_lock);
+
+       return ret;
+}
+
+static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+                              int len)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+       struct spi_transfer xfer[] = {
+               {
+                       .tx_buf = spi_priv->cmd_buf,
+                       .len = sizeof(spi_priv->cmd_buf),
+               },
+               {
+                       .tx_buf = buf,
+                       .len = len,
+               },
+       };
+       int ret;
+
+       mutex_lock(&spi_priv->cmd_lock);
+
+       spi_priv->cmd_buf[0] = addr >> 8;
+       spi_priv->cmd_buf[1] = addr;
+       spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8);
+       spi_priv->cmd_buf[3] = len;
+       ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+       mutex_unlock(&spi_priv->cmd_lock);
+
+       return ret;
+}
+
+static const struct w5100_ops w5200_ops = {
+       .may_sleep = true,
+       .chip_id = W5200,
+       .read = w5200_spi_read,
+       .write = w5200_spi_write,
+       .read16 = w5200_spi_read16,
+       .write16 = w5200_spi_write16,
+       .readbulk = w5200_spi_readbulk,
+       .writebulk = w5200_spi_writebulk,
+       .init = w5200_spi_init,
+};
+
+#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f)
+#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3)
+#define W5500_SPI_WRITE_CONTROL(addr)  \
+       ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2))
+
+struct w5500_spi_priv {
+       /* Serialize access to cmd_buf */
+       struct mutex cmd_lock;
+
+       /* DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       u8 cmd_buf[3] ____cacheline_aligned;
+};
+
+static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev)
+{
+       return w5100_ops_priv(ndev);
+}
+
+static int w5500_spi_init(struct net_device *ndev)
+{
+       struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+
+       mutex_init(&spi_priv->cmd_lock);
+
+       return 0;
+}
+
+static int w5500_spi_read(struct net_device *ndev, u32 addr)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[3] = {
+               addr >> 8,
+               addr,
+               W5500_SPI_READ_CONTROL(addr)
+       };
+       u8 data;
+       int ret;
+
+       ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+       return ret ? ret : data;
+}
+
+static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[4] = {
+               addr >> 8,
+               addr,
+               W5500_SPI_WRITE_CONTROL(addr),
+               data
+       };
+
+       return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_read16(struct net_device *ndev, u32 addr)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[3] = {
+               addr >> 8,
+               addr,
+               W5500_SPI_READ_CONTROL(addr)
+       };
+       __be16 data;
+       int ret;
+
+       ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+       return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       u8 cmd[5] = {
+               addr >> 8,
+               addr,
+               W5500_SPI_WRITE_CONTROL(addr),
+               data >> 8,
+               data
+       };
+
+       return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+                             int len)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+       struct spi_transfer xfer[] = {
+               {
+                       .tx_buf = spi_priv->cmd_buf,
+                       .len = sizeof(spi_priv->cmd_buf),
+               },
+               {
+                       .rx_buf = buf,
+                       .len = len,
+               },
+       };
+       int ret;
+
+       mutex_lock(&spi_priv->cmd_lock);
+
+       spi_priv->cmd_buf[0] = addr >> 8;
+       spi_priv->cmd_buf[1] = addr;
+       spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr);
+       ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+       mutex_unlock(&spi_priv->cmd_lock);
+
+       return ret;
+}
+
+static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+                              int len)
+{
+       struct spi_device *spi = to_spi_device(ndev->dev.parent);
+       struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+       struct spi_transfer xfer[] = {
+               {
+                       .tx_buf = spi_priv->cmd_buf,
+                       .len = sizeof(spi_priv->cmd_buf),
+               },
+               {
+                       .tx_buf = buf,
+                       .len = len,
+               },
+       };
+       int ret;
+
+       mutex_lock(&spi_priv->cmd_lock);
+
+       spi_priv->cmd_buf[0] = addr >> 8;
+       spi_priv->cmd_buf[1] = addr;
+       spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr);
+       ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+       mutex_unlock(&spi_priv->cmd_lock);
+
+       return ret;
+}
+
+static const struct w5100_ops w5500_ops = {
+       .may_sleep = true,
+       .chip_id = W5500,
+       .read = w5500_spi_read,
+       .write = w5500_spi_write,
+       .read16 = w5500_spi_read16,
+       .write16 = w5500_spi_write16,
+       .readbulk = w5500_spi_readbulk,
+       .writebulk = w5500_spi_writebulk,
+       .init = w5500_spi_init,
+};
+
+static int w5100_spi_probe(struct spi_device *spi)
+{
+       const struct spi_device_id *id = spi_get_device_id(spi);
+       const struct w5100_ops *ops;
+       int priv_size;
+
+       switch (id->driver_data) {
+       case W5100:
+               ops = &w5100_spi_ops;
+               priv_size = 0;
+               break;
+       case W5200:
+               ops = &w5200_ops;
+               priv_size = sizeof(struct w5200_spi_priv);
+               break;
+       case W5500:
+               ops = &w5500_ops;
+               priv_size = sizeof(struct w5500_spi_priv);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return w5100_probe(&spi->dev, ops, priv_size, NULL, spi->irq, -EINVAL);
+}
+
+static int w5100_spi_remove(struct spi_device *spi)
+{
+       return w5100_remove(&spi->dev);
+}
+
+static const struct spi_device_id w5100_spi_ids[] = {
+       { "w5100", W5100 },
+       { "w5200", W5200 },
+       { "w5500", W5500 },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, w5100_spi_ids);
+
+static struct spi_driver w5100_spi_driver = {
+       .driver         = {
+               .name   = "w5100",
+               .pm     = &w5100_pm_ops,
+       },
+       .probe          = w5100_spi_probe,
+       .remove         = w5100_spi_remove,
+       .id_table       = w5100_spi_ids,
+};
+module_spi_driver(w5100_spi_driver);
+
+MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
index 8b282d0b169c4a06e82479b215dbb02fcceb4c1e..ec1889ce38a384f1311286c5835f61bc1f26bf40 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/irq.h>
 #include <linux/gpio.h>
 
+#include "w5100.h"
+
 #define DRV_NAME       "w5100"
 #define DRV_VERSION    "2012-04-04"
 
@@ -36,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME);
 MODULE_LICENSE("GPL");
 
 /*
- * Registers
+ * W5100/W5200/W5500 common registers
  */
 #define W5100_COMMON_REGS      0x0000
 #define W5100_MR               0x0000 /* Mode Register */
@@ -46,55 +48,114 @@ MODULE_LICENSE("GPL");
 #define   MR_IND                 0x01 /* Indirect mode */
 #define W5100_SHAR             0x0009 /* Source MAC address */
 #define W5100_IR               0x0015 /* Interrupt Register */
-#define W5100_IMR              0x0016 /* Interrupt Mask Register */
-#define   IR_S0                          0x01 /* S0 interrupt */
-#define W5100_RTR              0x0017 /* Retry Time-value Register */
-#define   RTR_DEFAULT            2000 /* =0x07d0 (2000) */
-#define W5100_RMSR             0x001a /* Receive Memory Size */
-#define W5100_TMSR             0x001b /* Transmit Memory Size */
 #define W5100_COMMON_REGS_LEN  0x0040
 
-#define W5100_S0_REGS          0x0400
-#define W5100_S0_MR            0x0400 /* S0 Mode Register */
+#define W5100_Sn_MR            0x0000 /* Sn Mode Register */
+#define W5100_Sn_CR            0x0001 /* Sn Command Register */
+#define W5100_Sn_IR            0x0002 /* Sn Interrupt Register */
+#define W5100_Sn_SR            0x0003 /* Sn Status Register */
+#define W5100_Sn_TX_FSR                0x0020 /* Sn Transmit free memory size */
+#define W5100_Sn_TX_RD         0x0022 /* Sn Transmit memory read pointer */
+#define W5100_Sn_TX_WR         0x0024 /* Sn Transmit memory write pointer */
+#define W5100_Sn_RX_RSR                0x0026 /* Sn Receive free memory size */
+#define W5100_Sn_RX_RD         0x0028 /* Sn Receive memory read pointer */
+
+#define S0_REGS(priv)          ((priv)->s0_regs)
+
+#define W5100_S0_MR(priv)      (S0_REGS(priv) + W5100_Sn_MR)
 #define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscuous) */
 #define   S0_MR_MACRAW_MF        0x44 /* MAC RAW mode (filtered) */
-#define W5100_S0_CR            0x0401 /* S0 Command Register */
+#define W5100_S0_CR(priv)      (S0_REGS(priv) + W5100_Sn_CR)
 #define   S0_CR_OPEN             0x01 /* OPEN command */
 #define   S0_CR_CLOSE            0x10 /* CLOSE command */
 #define   S0_CR_SEND             0x20 /* SEND command */
 #define   S0_CR_RECV             0x40 /* RECV command */
-#define W5100_S0_IR            0x0402 /* S0 Interrupt Register */
+#define W5100_S0_IR(priv)      (S0_REGS(priv) + W5100_Sn_IR)
 #define   S0_IR_SENDOK           0x10 /* complete sending */
 #define   S0_IR_RECV             0x04 /* receiving data */
-#define W5100_S0_SR            0x0403 /* S0 Status Register */
+#define W5100_S0_SR(priv)      (S0_REGS(priv) + W5100_Sn_SR)
 #define   S0_SR_MACRAW           0x42 /* mac raw mode */
-#define W5100_S0_TX_FSR                0x0420 /* S0 Transmit free memory size */
-#define W5100_S0_TX_RD         0x0422 /* S0 Transmit memory read pointer */
-#define W5100_S0_TX_WR         0x0424 /* S0 Transmit memory write pointer */
-#define W5100_S0_RX_RSR                0x0426 /* S0 Receive free memory size */
-#define W5100_S0_RX_RD         0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_TX_FSR(priv)  (S0_REGS(priv) + W5100_Sn_TX_FSR)
+#define W5100_S0_TX_RD(priv)   (S0_REGS(priv) + W5100_Sn_TX_RD)
+#define W5100_S0_TX_WR(priv)   (S0_REGS(priv) + W5100_Sn_TX_WR)
+#define W5100_S0_RX_RSR(priv)  (S0_REGS(priv) + W5100_Sn_RX_RSR)
+#define W5100_S0_RX_RD(priv)   (S0_REGS(priv) + W5100_Sn_RX_RD)
+
 #define W5100_S0_REGS_LEN      0x0040
 
+/*
+ * W5100 and W5200 common registers
+ */
+#define W5100_IMR              0x0016 /* Interrupt Mask Register */
+#define   IR_S0                          0x01 /* S0 interrupt */
+#define W5100_RTR              0x0017 /* Retry Time-value Register */
+#define   RTR_DEFAULT            2000 /* =0x07d0 (2000) */
+
+/*
+ * W5100 specific register and memory
+ */
+#define W5100_RMSR             0x001a /* Receive Memory Size */
+#define W5100_TMSR             0x001b /* Transmit Memory Size */
+
+#define W5100_S0_REGS          0x0400
+
 #define W5100_TX_MEM_START     0x4000
-#define W5100_TX_MEM_END       0x5fff
-#define W5100_TX_MEM_MASK      0x1fff
+#define W5100_TX_MEM_SIZE      0x2000
 #define W5100_RX_MEM_START     0x6000
-#define W5100_RX_MEM_END       0x7fff
-#define W5100_RX_MEM_MASK      0x1fff
+#define W5100_RX_MEM_SIZE      0x2000
+
+/*
+ * W5200 specific register and memory
+ */
+#define W5200_S0_REGS          0x4000
+
+#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
+#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
+
+#define W5200_TX_MEM_START     0x8000
+#define W5200_TX_MEM_SIZE      0x4000
+#define W5200_RX_MEM_START     0xc000
+#define W5200_RX_MEM_SIZE      0x4000
+
+/*
+ * W5500 specific register and memory
+ *
+ * W5500 register and memory are organized by multiple blocks.  Each one is
+ * selected by 16bits offset address and 5bits block select bits.  So we
+ * encode it into 32bits address. (lower 16bits is offset address and
+ * upper 16bits is block select bits)
+ */
+#define W5500_SIMR             0x0018 /* Socket Interrupt Mask Register */
+#define W5500_RTR              0x0019 /* Retry Time-value Register */
+
+#define W5500_S0_REGS          0x10000
+
+#define W5500_Sn_RXMEM_SIZE(n) \
+               (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
+#define W5500_Sn_TXMEM_SIZE(n) \
+               (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
+
+#define W5500_TX_MEM_START     0x20000
+#define W5500_TX_MEM_SIZE      0x04000
+#define W5500_RX_MEM_START     0x30000
+#define W5500_RX_MEM_SIZE      0x04000
 
 /*
  * Device driver private data structure
  */
+
 struct w5100_priv {
-       void __iomem *base;
-       spinlock_t reg_lock;
-       bool indirect;
-       u8   (*read)(struct w5100_priv *priv, u16 addr);
-       void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
-       u16  (*read16)(struct w5100_priv *priv, u16 addr);
-       void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
-       void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
-       void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+       const struct w5100_ops *ops;
+
+       /* Socket 0 register offset address */
+       u32 s0_regs;
+       /* Socket 0 TX buffer offset address and size */
+       u32 s0_tx_buf;
+       u16 s0_tx_buf_size;
+       /* Socket 0 RX buffer offset address and size */
+       u32 s0_rx_buf;
+       u16 s0_rx_buf_size;
+
        int irq;
        int link_irq;
        int link_gpio;
@@ -103,71 +164,142 @@ struct w5100_priv {
        struct net_device *ndev;
        bool promisc;
        u32 msg_enable;
+
+       struct workqueue_struct *xfer_wq;
+       struct work_struct rx_work;
+       struct sk_buff *tx_skb;
+       struct work_struct tx_work;
+       struct work_struct setrx_work;
+       struct work_struct restart_work;
 };
 
+static inline bool is_w5200(struct w5100_priv *priv)
+{
+       return priv->ops->chip_id == W5200;
+}
+
 /************************************************************************
  *
  *  Lowlevel I/O functions
  *
  ***********************************************************************/
 
+struct w5100_mmio_priv {
+       void __iomem *base;
+       /* Serialize access in indirect address mode */
+       spinlock_t reg_lock;
+};
+
+static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
+{
+       return w5100_ops_priv(dev);
+}
+
+static inline void __iomem *w5100_mmio(struct net_device *ndev)
+{
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+
+       return mmio_priv->base;
+}
+
 /*
  * In direct address mode host system can directly access W5100 registers
  * after mapping to Memory-Mapped I/O space.
  *
  * 0x8000 bytes are required for memory space.
  */
-static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
 {
-       return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+       return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
 }
 
-static inline void w5100_write_direct(struct w5100_priv *priv,
-                                     u16 addr, u8 data)
+static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
+                                      u8 data)
 {
-       iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+       iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
+
+       return 0;
 }
 
-static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
+{
+       __w5100_write_direct(ndev, addr, data);
+       mmiowb();
+
+       return 0;
+}
+
+static int w5100_read16_direct(struct net_device *ndev, u32 addr)
 {
        u16 data;
-       data  = w5100_read_direct(priv, addr) << 8;
-       data |= w5100_read_direct(priv, addr + 1);
+       data  = w5100_read_direct(ndev, addr) << 8;
+       data |= w5100_read_direct(ndev, addr + 1);
        return data;
 }
 
-static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
 {
-       w5100_write_direct(priv, addr, data >> 8);
-       w5100_write_direct(priv, addr + 1, data);
+       __w5100_write_direct(ndev, addr, data >> 8);
+       __w5100_write_direct(ndev, addr + 1, data);
+       mmiowb();
+
+       return 0;
 }
 
-static void w5100_readbuf_direct(struct w5100_priv *priv,
-                                u16 offset, u8 *buf, int len)
+static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
+                                int len)
 {
-       u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
        int i;
 
-       for (i = 0; i < len; i++, addr++) {
-               if (unlikely(addr > W5100_RX_MEM_END))
-                       addr = W5100_RX_MEM_START;
-               *buf++ = w5100_read_direct(priv, addr);
-       }
+       for (i = 0; i < len; i++, addr++)
+               *buf++ = w5100_read_direct(ndev, addr);
+
+       return 0;
 }
 
-static void w5100_writebuf_direct(struct w5100_priv *priv,
-                                 u16 offset, u8 *buf, int len)
+static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
+                                 const u8 *buf, int len)
 {
-       u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
        int i;
 
-       for (i = 0; i < len; i++, addr++) {
-               if (unlikely(addr > W5100_TX_MEM_END))
-                       addr = W5100_TX_MEM_START;
-               w5100_write_direct(priv, addr, *buf++);
-       }
+       for (i = 0; i < len; i++, addr++)
+               __w5100_write_direct(ndev, addr, *buf++);
+
+       mmiowb();
+
+       return 0;
 }
 
+static int w5100_mmio_init(struct net_device *ndev)
+{
+       struct platform_device *pdev = to_platform_device(ndev->dev.parent);
+       struct w5100_priv *priv = netdev_priv(ndev);
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+       struct resource *mem;
+
+       spin_lock_init(&mmio_priv->reg_lock);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(mmio_priv->base))
+               return PTR_ERR(mmio_priv->base);
+
+       netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
+
+       return 0;
+}
+
+static const struct w5100_ops w5100_mmio_direct_ops = {
+       .chip_id = W5100,
+       .read = w5100_read_direct,
+       .write = w5100_write_direct,
+       .read16 = w5100_read16_direct,
+       .write16 = w5100_write16_direct,
+       .readbulk = w5100_readbulk_direct,
+       .writebulk = w5100_writebulk_direct,
+       .init = w5100_mmio_init,
+};
+
 /*
  * In indirect address mode host system indirectly accesses registers by
  * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
@@ -179,139 +311,290 @@ static void w5100_writebuf_direct(struct w5100_priv *priv,
 #define W5100_IDM_AR           0x01   /* Indirect Mode Address Register */
 #define W5100_IDM_DR           0x03   /* Indirect Mode Data Register */
 
-static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read_indirect(struct net_device *ndev, u32 addr)
 {
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
        u8 data;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
-       data = w5100_read_direct(priv, W5100_IDM_DR);
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+       data = w5100_read_direct(ndev, W5100_IDM_DR);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
 
        return data;
 }
 
-static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
 {
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
-       w5100_write_direct(priv, W5100_IDM_DR, data);
-       mmiowb();
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+       w5100_write_direct(ndev, W5100_IDM_DR, data);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+       return 0;
 }
 
-static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
 {
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
        u16 data;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
-       data  = w5100_read_direct(priv, W5100_IDM_DR) << 8;
-       data |= w5100_read_direct(priv, W5100_IDM_DR);
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+       data  = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
+       data |= w5100_read_direct(ndev, W5100_IDM_DR);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
 
        return data;
 }
 
-static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
 {
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
-       w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
-       w5100_write_direct(priv, W5100_IDM_DR, data);
-       mmiowb();
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+       __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
+       w5100_write_direct(ndev, W5100_IDM_DR, data);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+       return 0;
 }
 
-static void w5100_readbuf_indirect(struct w5100_priv *priv,
-                                  u16 offset, u8 *buf, int len)
+static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
+                                  int len)
 {
-       u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
        int i;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+       for (i = 0; i < len; i++)
+               *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
 
-       for (i = 0; i < len; i++, addr++) {
-               if (unlikely(addr > W5100_RX_MEM_END)) {
-                       addr = W5100_RX_MEM_START;
-                       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-                       mmiowb();
-               }
-               *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
-       }
        mmiowb();
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+       return 0;
 }
 
-static void w5100_writebuf_indirect(struct w5100_priv *priv,
-                                   u16 offset, u8 *buf, int len)
+static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
+                                   const u8 *buf, int len)
 {
-       u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+       struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
        unsigned long flags;
        int i;
 
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-       mmiowb();
+       spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+       w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+       for (i = 0; i < len; i++)
+               __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
 
-       for (i = 0; i < len; i++, addr++) {
-               if (unlikely(addr > W5100_TX_MEM_END)) {
-                       addr = W5100_TX_MEM_START;
-                       w5100_write16_direct(priv, W5100_IDM_AR, addr);
-                       mmiowb();
-               }
-               w5100_write_direct(priv, W5100_IDM_DR, *buf++);
-       }
        mmiowb();
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
+       spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+       return 0;
+}
+
+static int w5100_reset_indirect(struct net_device *ndev)
+{
+       w5100_write_direct(ndev, W5100_MR, MR_RST);
+       mdelay(5);
+       w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
+
+       return 0;
 }
 
+static const struct w5100_ops w5100_mmio_indirect_ops = {
+       .chip_id = W5100,
+       .read = w5100_read_indirect,
+       .write = w5100_write_indirect,
+       .read16 = w5100_read16_indirect,
+       .write16 = w5100_write16_indirect,
+       .readbulk = w5100_readbulk_indirect,
+       .writebulk = w5100_writebulk_indirect,
+       .init = w5100_mmio_init,
+       .reset = w5100_reset_indirect,
+};
+
 #if defined(CONFIG_WIZNET_BUS_DIRECT)
-#define w5100_read     w5100_read_direct
-#define w5100_write    w5100_write_direct
-#define w5100_read16   w5100_read16_direct
-#define w5100_write16  w5100_write16_direct
-#define w5100_readbuf  w5100_readbuf_direct
-#define w5100_writebuf w5100_writebuf_direct
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+       return w5100_read_direct(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+       return w5100_write_direct(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+       return w5100_read16_direct(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+       return w5100_write16_direct(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+       return w5100_readbulk_direct(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+                          int len)
+{
+       return w5100_writebulk_direct(priv->ndev, addr, buf, len);
+}
 
 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
-#define w5100_read     w5100_read_indirect
-#define w5100_write    w5100_write_indirect
-#define w5100_read16   w5100_read16_indirect
-#define w5100_write16  w5100_write16_indirect
-#define w5100_readbuf  w5100_readbuf_indirect
-#define w5100_writebuf w5100_writebuf_indirect
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+       return w5100_read_indirect(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+       return w5100_write_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+       return w5100_read16_indirect(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+       return w5100_write16_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+       return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+                          int len)
+{
+       return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
+}
 
 #else /* CONFIG_WIZNET_BUS_ANY */
-#define w5100_read     priv->read
-#define w5100_write    priv->write
-#define w5100_read16   priv->read16
-#define w5100_write16  priv->write16
-#define w5100_readbuf  priv->readbuf
-#define w5100_writebuf priv->writebuf
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+       return priv->ops->read(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+       return priv->ops->write(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+       return priv->ops->read16(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+       return priv->ops->write16(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+       return priv->ops->readbulk(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+                          int len)
+{
+       return priv->ops->writebulk(priv->ndev, addr, buf, len);
+}
+
 #endif
 
+static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
+{
+       u32 addr;
+       int remain = 0;
+       int ret;
+       const u32 mem_start = priv->s0_rx_buf;
+       const u16 mem_size = priv->s0_rx_buf_size;
+
+       offset %= mem_size;
+       addr = mem_start + offset;
+
+       if (offset + len > mem_size) {
+               remain = (offset + len) % mem_size;
+               len = mem_size - offset;
+       }
+
+       ret = w5100_readbulk(priv, addr, buf, len);
+       if (ret || !remain)
+               return ret;
+
+       return w5100_readbulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
+                         int len)
+{
+       u32 addr;
+       int ret;
+       int remain = 0;
+       const u32 mem_start = priv->s0_tx_buf;
+       const u16 mem_size = priv->s0_tx_buf_size;
+
+       offset %= mem_size;
+       addr = mem_start + offset;
+
+       if (offset + len > mem_size) {
+               remain = (offset + len) % mem_size;
+               len = mem_size - offset;
+       }
+
+       ret = w5100_writebulk(priv, addr, buf, len);
+       if (ret || !remain)
+               return ret;
+
+       return w5100_writebulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_reset(struct w5100_priv *priv)
+{
+       if (priv->ops->reset)
+               return priv->ops->reset(priv->ndev);
+
+       w5100_write(priv, W5100_MR, MR_RST);
+       mdelay(5);
+       w5100_write(priv, W5100_MR, MR_PB);
+
+       return 0;
+}
+
 static int w5100_command(struct w5100_priv *priv, u16 cmd)
 {
-       unsigned long timeout = jiffies + msecs_to_jiffies(100);
+       unsigned long timeout;
 
-       w5100_write(priv, W5100_S0_CR, cmd);
-       mmiowb();
+       w5100_write(priv, W5100_S0_CR(priv), cmd);
+
+       timeout = jiffies + msecs_to_jiffies(100);
 
-       while (w5100_read(priv, W5100_S0_CR) != 0) {
+       while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
                if (time_after(jiffies, timeout))
                        return -EIO;
                cpu_relax();
@@ -323,47 +606,116 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd)
 static void w5100_write_macaddr(struct w5100_priv *priv)
 {
        struct net_device *ndev = priv->ndev;
-       int i;
 
-       for (i = 0; i < ETH_ALEN; i++)
-               w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
-       mmiowb();
+       w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
 }
 
-static void w5100_hw_reset(struct w5100_priv *priv)
+static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
 {
-       w5100_write_direct(priv, W5100_MR, MR_RST);
-       mmiowb();
-       mdelay(5);
-       w5100_write_direct(priv, W5100_MR, priv->indirect ?
-                                 MR_PB | MR_AI | MR_IND :
-                                 MR_PB);
-       mmiowb();
-       w5100_write(priv, W5100_IMR, 0);
-       w5100_write_macaddr(priv);
+       u32 imr;
 
+       if (priv->ops->chip_id == W5500)
+               imr = W5500_SIMR;
+       else
+               imr = W5100_IMR;
+
+       w5100_write(priv, imr, mask);
+}
+
+static void w5100_enable_intr(struct w5100_priv *priv)
+{
+       w5100_socket_intr_mask(priv, IR_S0);
+}
+
+static void w5100_disable_intr(struct w5100_priv *priv)
+{
+       w5100_socket_intr_mask(priv, 0);
+}
+
+static void w5100_memory_configure(struct w5100_priv *priv)
+{
        /* Configure 16K of internal memory
         * as 8K RX buffer and 8K TX buffer
         */
        w5100_write(priv, W5100_RMSR, 0x03);
        w5100_write(priv, W5100_TMSR, 0x03);
-       mmiowb();
+}
+
+static void w5200_memory_configure(struct w5100_priv *priv)
+{
+       int i;
+
+       /* Configure internal RX memory as 16K RX buffer and
+        * internal TX memory as 16K TX buffer
+        */
+       w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
+       w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
+
+       for (i = 1; i < 8; i++) {
+               w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
+               w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
+       }
+}
+
+static void w5500_memory_configure(struct w5100_priv *priv)
+{
+       int i;
+
+       /* Configure internal RX memory as 16K RX buffer and
+        * internal TX memory as 16K TX buffer
+        */
+       w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
+       w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
+
+       for (i = 1; i < 8; i++) {
+               w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
+               w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
+       }
+}
+
+static int w5100_hw_reset(struct w5100_priv *priv)
+{
+       u32 rtr;
+
+       w5100_reset(priv);
+
+       w5100_disable_intr(priv);
+       w5100_write_macaddr(priv);
+
+       switch (priv->ops->chip_id) {
+       case W5100:
+               w5100_memory_configure(priv);
+               rtr = W5100_RTR;
+               break;
+       case W5200:
+               w5200_memory_configure(priv);
+               rtr = W5100_RTR;
+               break;
+       case W5500:
+               w5500_memory_configure(priv);
+               rtr = W5500_RTR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (w5100_read16(priv, rtr) != RTR_DEFAULT)
+               return -ENODEV;
+
+       return 0;
 }
 
 static void w5100_hw_start(struct w5100_priv *priv)
 {
-       w5100_write(priv, W5100_S0_MR, priv->promisc ?
+       w5100_write(priv, W5100_S0_MR(priv), priv->promisc ?
                          S0_MR_MACRAW : S0_MR_MACRAW_MF);
-       mmiowb();
        w5100_command(priv, S0_CR_OPEN);
-       w5100_write(priv, W5100_IMR, IR_S0);
-       mmiowb();
+       w5100_enable_intr(priv);
 }
 
 static void w5100_hw_close(struct w5100_priv *priv)
 {
-       w5100_write(priv, W5100_IMR, 0);
-       mmiowb();
+       w5100_disable_intr(priv);
        w5100_command(priv, S0_CR_CLOSE);
 }
 
@@ -412,20 +764,17 @@ static int w5100_get_regs_len(struct net_device *ndev)
 }
 
 static void w5100_get_regs(struct net_device *ndev,
-                          struct ethtool_regs *regs, void *_buf)
+                          struct ethtool_regs *regs, void *buf)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
-       u8 *buf = _buf;
-       u16 i;
 
        regs->version = 1;
-       for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
-               *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
-       for (i = 0; i < W5100_S0_REGS_LEN; i++)
-               *buf++ = w5100_read(priv, W5100_S0_REGS + i);
+       w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
+       buf += W5100_COMMON_REGS_LEN;
+       w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
 }
 
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_restart(struct net_device *ndev)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
 
@@ -433,74 +782,138 @@ static void w5100_tx_timeout(struct net_device *ndev)
        w5100_hw_reset(priv);
        w5100_hw_start(priv);
        ndev->stats.tx_errors++;
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        netif_wake_queue(ndev);
 }
 
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static void w5100_restart_work(struct work_struct *work)
+{
+       struct w5100_priv *priv = container_of(work, struct w5100_priv,
+                                              restart_work);
+
+       w5100_restart(priv->ndev);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
-       u16 offset;
 
-       netif_stop_queue(ndev);
+       if (priv->ops->may_sleep)
+               schedule_work(&priv->restart_work);
+       else
+               w5100_restart(ndev);
+}
 
-       offset = w5100_read16(priv, W5100_S0_TX_WR);
+static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+       u16 offset;
+
+       offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
        w5100_writebuf(priv, offset, skb->data, skb->len);
-       w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
-       mmiowb();
+       w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
        ndev->stats.tx_bytes += skb->len;
        ndev->stats.tx_packets++;
        dev_kfree_skb(skb);
 
        w5100_command(priv, S0_CR_SEND);
+}
+
+static void w5100_tx_work(struct work_struct *work)
+{
+       struct w5100_priv *priv = container_of(work, struct w5100_priv,
+                                              tx_work);
+       struct sk_buff *skb = priv->tx_skb;
+
+       priv->tx_skb = NULL;
+
+       if (WARN_ON(!skb))
+               return;
+       w5100_tx_skb(priv->ndev, skb);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+
+       if (priv->ops->may_sleep) {
+               WARN_ON(priv->tx_skb);
+               priv->tx_skb = skb;
+               queue_work(priv->xfer_wq, &priv->tx_work);
+       } else {
+               w5100_tx_skb(ndev, skb);
+       }
 
        return NETDEV_TX_OK;
 }
 
-static int w5100_napi_poll(struct napi_struct *napi, int budget)
+static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
 {
-       struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
-       struct net_device *ndev = priv->ndev;
+       struct w5100_priv *priv = netdev_priv(ndev);
        struct sk_buff *skb;
-       int rx_count;
        u16 rx_len;
        u16 offset;
        u8 header[2];
+       u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
 
-       for (rx_count = 0; rx_count < budget; rx_count++) {
-               u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
-               if (rx_buf_len == 0)
-                       break;
+       if (rx_buf_len == 0)
+               return NULL;
 
-               offset = w5100_read16(priv, W5100_S0_RX_RD);
-               w5100_readbuf(priv, offset, header, 2);
-               rx_len = get_unaligned_be16(header) - 2;
-
-               skb = netdev_alloc_skb_ip_align(ndev, rx_len);
-               if (unlikely(!skb)) {
-                       w5100_write16(priv, W5100_S0_RX_RD,
-                                           offset + rx_buf_len);
-                       w5100_command(priv, S0_CR_RECV);
-                       ndev->stats.rx_dropped++;
-                       return -ENOMEM;
-               }
+       offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
+       w5100_readbuf(priv, offset, header, 2);
+       rx_len = get_unaligned_be16(header) - 2;
 
-               skb_put(skb, rx_len);
-               w5100_readbuf(priv, offset + 2, skb->data, rx_len);
-               w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
-               mmiowb();
+       skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+       if (unlikely(!skb)) {
+               w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
                w5100_command(priv, S0_CR_RECV);
-               skb->protocol = eth_type_trans(skb, ndev);
+               ndev->stats.rx_dropped++;
+               return NULL;
+       }
+
+       skb_put(skb, rx_len);
+       w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+       w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
+       w5100_command(priv, S0_CR_RECV);
+       skb->protocol = eth_type_trans(skb, ndev);
+
+       ndev->stats.rx_packets++;
+       ndev->stats.rx_bytes += rx_len;
+
+       return skb;
+}
+
+static void w5100_rx_work(struct work_struct *work)
+{
+       struct w5100_priv *priv = container_of(work, struct w5100_priv,
+                                              rx_work);
+       struct sk_buff *skb;
+
+       while ((skb = w5100_rx_skb(priv->ndev)))
+               netif_rx_ni(skb);
+
+       w5100_enable_intr(priv);
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+       int rx_count;
+
+       for (rx_count = 0; rx_count < budget; rx_count++) {
+               struct sk_buff *skb = w5100_rx_skb(priv->ndev);
 
-               netif_receive_skb(skb);
-               ndev->stats.rx_packets++;
-               ndev->stats.rx_bytes += rx_len;
+               if (skb)
+                       netif_receive_skb(skb);
+               else
+                       break;
        }
 
        if (rx_count < budget) {
                napi_complete(napi);
-               w5100_write(priv, W5100_IMR, IR_S0);
-               mmiowb();
+               w5100_enable_intr(priv);
        }
 
        return rx_count;
@@ -511,11 +924,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
        struct net_device *ndev = ndev_instance;
        struct w5100_priv *priv = netdev_priv(ndev);
 
-       int ir = w5100_read(priv, W5100_S0_IR);
+       int ir = w5100_read(priv, W5100_S0_IR(priv));
        if (!ir)
                return IRQ_NONE;
-       w5100_write(priv, W5100_S0_IR, ir);
-       mmiowb();
+       w5100_write(priv, W5100_S0_IR(priv), ir);
 
        if (ir & S0_IR_SENDOK) {
                netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -523,11 +935,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
        }
 
        if (ir & S0_IR_RECV) {
-               if (napi_schedule_prep(&priv->napi)) {
-                       w5100_write(priv, W5100_IMR, 0);
-                       mmiowb();
+               w5100_disable_intr(priv);
+
+               if (priv->ops->may_sleep)
+                       queue_work(priv->xfer_wq, &priv->rx_work);
+               else if (napi_schedule_prep(&priv->napi))
                        __napi_schedule(&priv->napi);
-               }
        }
 
        return IRQ_HANDLED;
@@ -551,6 +964,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
        return IRQ_HANDLED;
 }
 
+static void w5100_setrx_work(struct work_struct *work)
+{
+       struct w5100_priv *priv = container_of(work, struct w5100_priv,
+                                              setrx_work);
+
+       w5100_hw_start(priv);
+}
+
 static void w5100_set_rx_mode(struct net_device *ndev)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
@@ -558,7 +979,11 @@ static void w5100_set_rx_mode(struct net_device *ndev)
 
        if (priv->promisc != set_promisc) {
                priv->promisc = set_promisc;
-               w5100_hw_start(priv);
+
+               if (priv->ops->may_sleep)
+                       schedule_work(&priv->setrx_work);
+               else
+                       w5100_hw_start(priv);
        }
 }
 
@@ -620,91 +1045,96 @@ static const struct net_device_ops w5100_netdev_ops = {
        .ndo_change_mtu         = eth_change_mtu,
 };
 
-static int w5100_hw_probe(struct platform_device *pdev)
+static int w5100_mmio_probe(struct platform_device *pdev)
 {
        struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
-       struct net_device *ndev = platform_get_drvdata(pdev);
-       struct w5100_priv *priv = netdev_priv(ndev);
-       const char *name = netdev_name(ndev);
+       u8 *mac_addr = NULL;
        struct resource *mem;
-       int mem_size;
+       const struct w5100_ops *ops;
        int irq;
-       int ret;
 
-       if (data && is_valid_ether_addr(data->mac_addr)) {
-               memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
-       } else {
-               eth_hw_addr_random(ndev);
-       }
+       if (data && is_valid_ether_addr(data->mac_addr))
+               mac_addr = data->mac_addr;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, mem);
-       if (IS_ERR(priv->base))
-               return PTR_ERR(priv->base);
-
-       mem_size = resource_size(mem);
-
-       spin_lock_init(&priv->reg_lock);
-       priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
-       if (priv->indirect) {
-               priv->read     = w5100_read_indirect;
-               priv->write    = w5100_write_indirect;
-               priv->read16   = w5100_read16_indirect;
-               priv->write16  = w5100_write16_indirect;
-               priv->readbuf  = w5100_readbuf_indirect;
-               priv->writebuf = w5100_writebuf_indirect;
-       } else {
-               priv->read     = w5100_read_direct;
-               priv->write    = w5100_write_direct;
-               priv->read16   = w5100_read16_direct;
-               priv->write16  = w5100_write16_direct;
-               priv->readbuf  = w5100_readbuf_direct;
-               priv->writebuf = w5100_writebuf_direct;
-       }
-
-       w5100_hw_reset(priv);
-       if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
-               return -ENODEV;
+       if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
+               ops = &w5100_mmio_indirect_ops;
+       else
+               ops = &w5100_mmio_direct_ops;
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
-       ret = request_irq(irq, w5100_interrupt,
-                         IRQ_TYPE_LEVEL_LOW, name, ndev);
-       if (ret < 0)
-               return ret;
-       priv->irq = irq;
 
-       priv->link_gpio = data ? data->link_gpio : -EINVAL;
-       if (gpio_is_valid(priv->link_gpio)) {
-               char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
-               if (!link_name)
-                       return -ENOMEM;
-               snprintf(link_name, 16, "%s-link", name);
-               priv->link_irq = gpio_to_irq(priv->link_gpio);
-               if (request_any_context_irq(priv->link_irq, w5100_detect_link,
-                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                               link_name, priv->ndev) < 0)
-                       priv->link_gpio = -EINVAL;
-       }
+       return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
+                          mac_addr, irq, data ? data->link_gpio : -EINVAL);
+}
 
-       netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
-       return 0;
+static int w5100_mmio_remove(struct platform_device *pdev)
+{
+       return w5100_remove(&pdev->dev);
+}
+
+void *w5100_ops_priv(const struct net_device *ndev)
+{
+       return netdev_priv(ndev) +
+              ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
 }
+EXPORT_SYMBOL_GPL(w5100_ops_priv);
 
-static int w5100_probe(struct platform_device *pdev)
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+               int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio)
 {
        struct w5100_priv *priv;
        struct net_device *ndev;
        int err;
+       size_t alloc_size;
 
-       ndev = alloc_etherdev(sizeof(*priv));
+       alloc_size = sizeof(*priv);
+       if (sizeof_ops_priv) {
+               alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
+               alloc_size += sizeof_ops_priv;
+       }
+       alloc_size += NETDEV_ALIGN - 1;
+
+       ndev = alloc_etherdev(alloc_size);
        if (!ndev)
                return -ENOMEM;
-       SET_NETDEV_DEV(ndev, &pdev->dev);
-       platform_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, dev);
+       dev_set_drvdata(dev, ndev);
        priv = netdev_priv(ndev);
+
+       switch (ops->chip_id) {
+       case W5100:
+               priv->s0_regs = W5100_S0_REGS;
+               priv->s0_tx_buf = W5100_TX_MEM_START;
+               priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
+               priv->s0_rx_buf = W5100_RX_MEM_START;
+               priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
+               break;
+       case W5200:
+               priv->s0_regs = W5200_S0_REGS;
+               priv->s0_tx_buf = W5200_TX_MEM_START;
+               priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
+               priv->s0_rx_buf = W5200_RX_MEM_START;
+               priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
+               break;
+       case W5500:
+               priv->s0_regs = W5500_S0_REGS;
+               priv->s0_tx_buf = W5500_TX_MEM_START;
+               priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
+               priv->s0_rx_buf = W5500_RX_MEM_START;
+               priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
+               break;
+       default:
+               err = -EINVAL;
+               goto err_register;
+       }
+
        priv->ndev = ndev;
+       priv->ops = ops;
+       priv->irq = irq;
+       priv->link_gpio = link_gpio;
 
        ndev->netdev_ops = &w5100_netdev_ops;
        ndev->ethtool_ops = &w5100_ethtool_ops;
@@ -720,22 +1150,76 @@ static int w5100_probe(struct platform_device *pdev)
        if (err < 0)
                goto err_register;
 
-       err = w5100_hw_probe(pdev);
-       if (err < 0)
-               goto err_hw_probe;
+       priv->xfer_wq = create_workqueue(netdev_name(ndev));
+       if (!priv->xfer_wq) {
+               err = -ENOMEM;
+               goto err_wq;
+       }
+
+       INIT_WORK(&priv->rx_work, w5100_rx_work);
+       INIT_WORK(&priv->tx_work, w5100_tx_work);
+       INIT_WORK(&priv->setrx_work, w5100_setrx_work);
+       INIT_WORK(&priv->restart_work, w5100_restart_work);
+
+       if (mac_addr)
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+       else
+               eth_hw_addr_random(ndev);
+
+       if (priv->ops->init) {
+               err = priv->ops->init(priv->ndev);
+               if (err)
+                       goto err_hw;
+       }
+
+       err = w5100_hw_reset(priv);
+       if (err)
+               goto err_hw;
+
+       if (ops->may_sleep) {
+               err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
+                                          IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                          netdev_name(ndev), ndev);
+       } else {
+               err = request_irq(priv->irq, w5100_interrupt,
+                                 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
+       }
+       if (err)
+               goto err_hw;
+
+       if (gpio_is_valid(priv->link_gpio)) {
+               char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
+
+               if (!link_name) {
+                       err = -ENOMEM;
+                       goto err_gpio;
+               }
+               snprintf(link_name, 16, "%s-link", netdev_name(ndev));
+               priv->link_irq = gpio_to_irq(priv->link_gpio);
+               if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+                                           IRQF_TRIGGER_RISING |
+                                           IRQF_TRIGGER_FALLING,
+                                           link_name, priv->ndev) < 0)
+                       priv->link_gpio = -EINVAL;
+       }
 
        return 0;
 
-err_hw_probe:
+err_gpio:
+       free_irq(priv->irq, ndev);
+err_hw:
+       destroy_workqueue(priv->xfer_wq);
+err_wq:
        unregister_netdev(ndev);
 err_register:
        free_netdev(ndev);
        return err;
 }
+EXPORT_SYMBOL_GPL(w5100_probe);
 
-static int w5100_remove(struct platform_device *pdev)
+int w5100_remove(struct device *dev)
 {
-       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct w5100_priv *priv = netdev_priv(ndev);
 
        w5100_hw_reset(priv);
@@ -743,16 +1227,21 @@ static int w5100_remove(struct platform_device *pdev)
        if (gpio_is_valid(priv->link_gpio))
                free_irq(priv->link_irq, ndev);
 
+       flush_work(&priv->setrx_work);
+       flush_work(&priv->restart_work);
+       flush_workqueue(priv->xfer_wq);
+       destroy_workqueue(priv->xfer_wq);
+
        unregister_netdev(ndev);
        free_netdev(ndev);
        return 0;
 }
+EXPORT_SYMBOL_GPL(w5100_remove);
 
 #ifdef CONFIG_PM_SLEEP
 static int w5100_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct w5100_priv *priv = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
@@ -766,8 +1255,7 @@ static int w5100_suspend(struct device *dev)
 
 static int w5100_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct net_device *ndev = dev_get_drvdata(dev);
        struct w5100_priv *priv = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
@@ -783,15 +1271,15 @@ static int w5100_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+EXPORT_SYMBOL_GPL(w5100_pm_ops);
 
-static struct platform_driver w5100_driver = {
+static struct platform_driver w5100_mmio_driver = {
        .driver         = {
                .name   = DRV_NAME,
                .pm     = &w5100_pm_ops,
        },
-       .probe          = w5100_probe,
-       .remove         = w5100_remove,
+       .probe          = w5100_mmio_probe,
+       .remove         = w5100_mmio_remove,
 };
-
-module_platform_driver(w5100_driver);
+module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
new file mode 100644 (file)
index 0000000..f8a16fa
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+enum {
+       W5100,
+       W5200,
+       W5500,
+};
+
+struct w5100_ops {
+       bool may_sleep;
+       int chip_id;
+       int (*read)(struct net_device *ndev, u32 addr);
+       int (*write)(struct net_device *ndev, u32 addr, u8 data);
+       int (*read16)(struct net_device *ndev, u32 addr);
+       int (*write16)(struct net_device *ndev, u32 addr, u16 data);
+       int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
+       int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
+                        int len);
+       int (*reset)(struct net_device *ndev);
+       int (*init)(struct net_device *ndev);
+};
+
+void *w5100_ops_priv(const struct net_device *ndev);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+               int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio);
+int w5100_remove(struct device *dev);
+
+extern const struct dev_pm_ops w5100_pm_ops;
index 8da7b930ff595aa6ed79ddedbd5461b84dad6003..0b37ce9f28f1d4493bef915344d855f296e29953 100644 (file)
@@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
        w5300_hw_reset(priv);
        w5300_hw_start(priv);
        ndev->stats.tx_errors++;
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
        netif_wake_queue(ndev);
 }
 
index 5a1068df7038c5faf5d67742139ea1d0d53f0845..73970871202283368766f764a56fb7503a31eb0e 100644 (file)
@@ -584,7 +584,7 @@ static void temac_device_reset(struct net_device *ndev)
                dev_err(&ndev->dev, "Error setting TEMAC options\n");
 
        /* Init Driver variable */
-       ndev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(ndev); /* prevent tx timeout */
 }
 
 static void temac_adjust_link(struct net_device *ndev)
index 4684644703ccee1d4ad2c68439d48ac618b5c3b0..8c7f5be51e620be8e33954746f2174c046d5bccc 100644 (file)
@@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev)
        axienet_set_multicast_list(ndev);
        axienet_setoptions(ndev, lp->options);
 
-       ndev->trans_start = jiffies;
+       netif_trans_update(ndev);
 }
 
 /**
index e324b30923800f260a87a5f32e6089eacdbea1a1..3cee84a24815d7fec3f089b4c4c3d65519b6e1ae 100644 (file)
@@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
        }
 
        /* To exclude tx timeout */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 
        /* We're all ready to go. Start the queue */
        netif_wake_queue(dev);
@@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
                        dev->stats.tx_bytes += lp->deferred_skb->len;
                        dev_kfree_skb_irq(lp->deferred_skb);
                        lp->deferred_skb = NULL;
-                       dev->trans_start = jiffies; /* prevent tx timeout */
+                       netif_trans_update(dev); /* prevent tx timeout */
                        netif_wake_queue(dev);
                }
        }
index d56f8693202bb45ea79e6eedf5141cf40d97b53f..7b44968e02e6c2f48709e888e75018d350005155 100644 (file)
@@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
        struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
     netif_wake_queue(dev);
 }
 
index b103adb8d62ecfa9c1b21647d5c2498fa9586c96..0dbafedc0a347efac359a9bddc125448dbf62a27 100644 (file)
@@ -179,6 +179,8 @@ void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
 
        for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
                info->v1i.vlan_id[i] = vlan_id[i];
+
+       info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
 }
 
 void
@@ -214,6 +216,7 @@ static int fjes_hw_setup(struct fjes_hw *hw)
        u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
        struct fjes_device_command_param param;
        struct ep_share_mem_info *buf_pair;
+       unsigned long flags;
        size_t mem_size;
        int result;
        int epidx;
@@ -262,10 +265,12 @@ static int fjes_hw_setup(struct fjes_hw *hw)
                        if (result)
                                return result;
 
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&buf_pair->tx, mac,
                                            fjes_support_mtu[0]);
                        fjes_hw_setup_epbuf(&buf_pair->rx, mac,
                                            fjes_support_mtu[0]);
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
                }
        }
 
@@ -327,6 +332,7 @@ int fjes_hw_init(struct fjes_hw *hw)
        INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
 
        mutex_init(&hw->hw_info.lock);
+       spin_lock_init(&hw->rx_status_lock);
 
        hw->max_epid = fjes_hw_get_max_epid(hw);
        hw->my_epid = fjes_hw_get_my_epid(hw);
@@ -734,6 +740,7 @@ fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
 void fjes_hw_raise_epstop(struct fjes_hw *hw)
 {
        enum ep_partner_status status;
+       unsigned long flags;
        int epidx;
 
        for (epidx = 0; epidx < hw->max_epid; epidx++) {
@@ -753,8 +760,10 @@ void fjes_hw_raise_epstop(struct fjes_hw *hw)
                set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
                set_bit(epidx, &hw->txrx_stop_req_bit);
 
+               spin_lock_irqsave(&hw->rx_status_lock, flags);
                hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
                                FJES_RX_STOP_REQ_REQUEST;
+               spin_unlock_irqrestore(&hw->rx_status_lock, flags);
        }
 }
 
@@ -810,7 +819,8 @@ bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
 {
        union ep_buffer_info *info = epbh->info;
 
-       return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+       return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
+               info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
 }
 
 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
@@ -863,6 +873,9 @@ bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
 {
        union ep_buffer_info *info = epbh->info;
 
+       if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+               return true;
+
        if (info->v1i.count_max == 0)
                return true;
 
@@ -932,6 +945,7 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
 
        struct fjes_adapter *adapter;
        struct net_device *netdev;
+       unsigned long flags;
 
        ulong unshare_bit = 0;
        ulong share_bit = 0;
@@ -1024,8 +1038,10 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
                        continue;
 
                if (test_bit(epidx, &share_bit)) {
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
                                            netdev->dev_addr, netdev->mtu);
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
                        mutex_lock(&hw->hw_info.lock);
 
@@ -1069,10 +1085,14 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
 
                        mutex_unlock(&hw->hw_info.lock);
 
-                       if (ret == 0)
+                       if (ret == 0) {
+                               spin_lock_irqsave(&hw->rx_status_lock, flags);
                                fjes_hw_setup_epbuf(
                                        &hw->ep_shm_info[epidx].tx,
                                        netdev->dev_addr, netdev->mtu);
+                               spin_unlock_irqrestore(&hw->rx_status_lock,
+                                                      flags);
+                       }
                }
 
                if (test_bit(epidx, &irq_bit)) {
@@ -1080,9 +1100,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
                                                REG_ICTL_MASK_TXRX_STOP_REQ);
 
                        set_bit(epidx, &hw->txrx_stop_req_bit);
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
                        hw->ep_shm_info[epidx].tx.
                                info->v1i.rx_status |=
                                        FJES_RX_STOP_REQ_REQUEST;
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
                        set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
                }
        }
@@ -1098,6 +1120,7 @@ static void fjes_hw_epstop_task(struct work_struct *work)
 {
        struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
        struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+       unsigned long flags;
 
        ulong remain_bit;
        int epid_bit;
@@ -1105,9 +1128,12 @@ static void fjes_hw_epstop_task(struct work_struct *work)
        while ((remain_bit = hw->epstop_req_bit)) {
                for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
                        if (remain_bit & 1) {
+                               spin_lock_irqsave(&hw->rx_status_lock, flags);
                                hw->ep_shm_info[epid_bit].
                                        tx.info->v1i.rx_status |=
                                                FJES_RX_STOP_REQ_DONE;
+                               spin_unlock_irqrestore(&hw->rx_status_lock,
+                                                      flags);
 
                                clear_bit(epid_bit, &hw->epstop_req_bit);
                                set_bit(epid_bit,
index 6d57b89a0ee84aa28c6ffecc728dbc73a75fd779..1445ac99d6e316da52f3534b67be34f53f0e30d7 100644 (file)
@@ -33,9 +33,9 @@ struct fjes_hw;
 #define EP_BUFFER_SUPPORT_VLAN_MAX 4
 #define EP_BUFFER_INFO_SIZE 4096
 
-#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3) /* sec */
-#define FJES_COMMAND_REQ_TIMEOUT  (5 + 1) /* sec */
-#define FJES_COMMAND_REQ_BUFF_TIMEOUT  (8 * 3) /* sec */
+#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT  ((5 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT  (60 * 3) /* sec */
 #define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT       (1) /* sec */
 
 #define FJES_CMD_REQ_ERR_INFO_PARAM  (0x0001)
@@ -57,6 +57,7 @@ struct fjes_hw;
 #define FJES_RX_STOP_REQ_DONE          (0x1)
 #define FJES_RX_STOP_REQ_REQUEST       (0x2)
 #define FJES_RX_POLL_WORK              (0x4)
+#define FJES_RX_MTU_CHANGING_DONE      (0x8)
 
 #define EP_BUFFER_SIZE \
        (((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
@@ -299,6 +300,8 @@ struct fjes_hw {
        u8 *base;
 
        struct fjes_hw_info hw_info;
+
+       spinlock_t rx_status_lock; /* spinlock for rx_status */
 };
 
 int fjes_hw_init(struct fjes_hw *);
index 0ddb54fe3d91b44c961d03a2238d39abf109ef01..86c331bb5eb38ffbb9833740fd9fd05ea3bf237c 100644 (file)
@@ -29,7 +29,7 @@
 #include "fjes.h"
 
 #define MAJ 1
-#define MIN 0
+#define MIN 1
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
 #define DRV_NAME       "fjes"
 char fjes_driver_name[] = DRV_NAME;
@@ -290,6 +290,7 @@ static int fjes_close(struct net_device *netdev)
 {
        struct fjes_adapter *adapter = netdev_priv(netdev);
        struct fjes_hw *hw = &adapter->hw;
+       unsigned long flags;
        int epidx;
 
        netif_tx_stop_all_queues(netdev);
@@ -299,13 +300,18 @@ static int fjes_close(struct net_device *netdev)
 
        napi_disable(&adapter->napi);
 
+       spin_lock_irqsave(&hw->rx_status_lock, flags);
        for (epidx = 0; epidx < hw->max_epid; epidx++) {
                if (epidx == hw->my_epid)
                        continue;
 
-               adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
-                       ~FJES_RX_POLL_WORK;
+               if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+                   EP_PARTNER_SHARED)
+                       adapter->hw.ep_shm_info[epidx]
+                                  .tx.info->v1i.rx_status &=
+                               ~FJES_RX_POLL_WORK;
        }
+       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
        fjes_free_irq(adapter);
 
@@ -330,6 +336,7 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct ep_share_mem_info *buf_pair;
        struct fjes_hw *hw = &adapter->hw;
+       unsigned long flags;
        int result;
        int epidx;
 
@@ -371,8 +378,10 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
 
                buf_pair = &hw->ep_shm_info[epidx];
 
+               spin_lock_irqsave(&hw->rx_status_lock, flags);
                fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
                                    netdev->mtu);
+               spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
                if (fjes_hw_epid_is_same_zone(hw, epidx)) {
                        mutex_lock(&hw->hw_info.lock);
@@ -402,6 +411,7 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
        struct ep_share_mem_info *buf_pair;
        struct fjes_hw *hw = &adapter->hw;
        bool reset_flag = false;
+       unsigned long flags;
        int result;
        int epidx;
 
@@ -418,8 +428,10 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
 
                buf_pair = &hw->ep_shm_info[epidx];
 
+               spin_lock_irqsave(&hw->rx_status_lock, flags);
                fjes_hw_setup_epbuf(&buf_pair->tx,
                                    netdev->dev_addr, netdev->mtu);
+               spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
                clear_bit(epidx, &hw->txrx_stop_req_bit);
        }
@@ -459,7 +471,7 @@ static void fjes_tx_stall_task(struct work_struct *work)
        int i;
 
        if (((long)jiffies -
-               (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+               dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
                netif_wake_queue(netdev);
                return;
        }
@@ -481,6 +493,9 @@ static void fjes_tx_stall_task(struct work_struct *work)
 
                        info = adapter->hw.ep_shm_info[epid].tx.info;
 
+                       if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+                               return;
+
                        if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
                                         info->v1i.count_max)) {
                                all_queue_available = 0;
@@ -549,7 +564,8 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *work)
                if ((hw->ep_shm_info[epid].tx_status_work ==
                     FJES_TX_DELAY_SEND_PENDING) &&
                    (pstatus == EP_PARTNER_SHARED) &&
-                   !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+                   !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+                     FJES_RX_POLL_WORK)) {
                        fjes_hw_raise_interrupt(hw, epid,
                                                REG_ICTL_MASK_RX_DATA);
                }
@@ -653,7 +669,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                                &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
                        /* version is NOT 0 */
                        adapter->stats64.tx_carrier_errors += 1;
-                       hw->ep_shm_info[my_epid].net_stats
+                       hw->ep_shm_info[dest_epid].net_stats
                                                .tx_carrier_errors += 1;
 
                        ret = NETDEV_TX_OK;
@@ -661,9 +677,9 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                                &adapter->hw.ep_shm_info[dest_epid].rx,
                                netdev->mtu)) {
                        adapter->stats64.tx_dropped += 1;
-                       hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+                       hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
                        adapter->stats64.tx_errors += 1;
-                       hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+                       hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 
                        ret = NETDEV_TX_OK;
                } else if (vlan &&
@@ -694,15 +710,15 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                                            (long)adapter->tx_start_jiffies) >=
                                            FJES_TX_RETRY_TIMEOUT) {
                                        adapter->stats64.tx_fifo_errors += 1;
-                                       hw->ep_shm_info[my_epid].net_stats
+                                       hw->ep_shm_info[dest_epid].net_stats
                                                                .tx_fifo_errors += 1;
                                        adapter->stats64.tx_errors += 1;
-                                       hw->ep_shm_info[my_epid].net_stats
+                                       hw->ep_shm_info[dest_epid].net_stats
                                                                .tx_errors += 1;
 
                                        ret = NETDEV_TX_OK;
                                } else {
-                                       netdev->trans_start = jiffies;
+                                       netif_trans_update(netdev);
                                        netif_tx_stop_queue(cur_queue);
 
                                        if (!work_pending(&adapter->tx_stall_task))
@@ -714,10 +730,10 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                        } else {
                                if (!is_multi) {
                                        adapter->stats64.tx_packets += 1;
-                                       hw->ep_shm_info[my_epid].net_stats
+                                       hw->ep_shm_info[dest_epid].net_stats
                                                                .tx_packets += 1;
                                        adapter->stats64.tx_bytes += len;
-                                       hw->ep_shm_info[my_epid].net_stats
+                                       hw->ep_shm_info[dest_epid].net_stats
                                                                .tx_bytes += len;
                                }
 
@@ -759,9 +775,12 @@ fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 
 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 {
+       struct fjes_adapter *adapter = netdev_priv(netdev);
        bool running = netif_running(netdev);
-       int ret = 0;
-       int idx;
+       struct fjes_hw *hw = &adapter->hw;
+       unsigned long flags;
+       int ret = -EINVAL;
+       int idx, epidx;
 
        for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
                if (new_mtu <= fjes_support_mtu[idx]) {
@@ -769,19 +788,58 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
                        if (new_mtu == netdev->mtu)
                                return 0;
 
-                       if (running)
-                               fjes_close(netdev);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (ret)
+               return ret;
+
+       if (running) {
+               spin_lock_irqsave(&hw->rx_status_lock, flags);
+               for (epidx = 0; epidx < hw->max_epid; epidx++) {
+                       if (epidx == hw->my_epid)
+                               continue;
+                       hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+                               ~FJES_RX_MTU_CHANGING_DONE;
+               }
+               spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+               netif_tx_stop_all_queues(netdev);
+               netif_carrier_off(netdev);
+               cancel_work_sync(&adapter->tx_stall_task);
+               napi_disable(&adapter->napi);
+
+               msleep(1000);
+
+               netif_tx_stop_all_queues(netdev);
+       }
+
+       netdev->mtu = new_mtu;
 
-                       netdev->mtu = new_mtu;
+       if (running) {
+               for (epidx = 0; epidx < hw->max_epid; epidx++) {
+                       if (epidx == hw->my_epid)
+                               continue;
 
-                       if (running)
-                               ret = fjes_open(netdev);
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
+                       fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+                                           netdev->dev_addr,
+                                           netdev->mtu);
 
-                       return ret;
+                       hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+                               FJES_RX_MTU_CHANGING_DONE;
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
                }
+
+               netif_tx_wake_all_queues(netdev);
+               netif_carrier_on(netdev);
+               napi_enable(&adapter->napi);
+               napi_schedule(&adapter->napi);
        }
 
-       return -EINVAL;
+       return ret;
 }
 
 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
@@ -825,6 +883,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
 {
        struct fjes_hw *hw = &adapter->hw;
        enum ep_partner_status status;
+       unsigned long flags;
 
        status = fjes_hw_get_partner_ep_status(hw, src_epid);
        switch (status) {
@@ -834,8 +893,10 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
                break;
        case EP_PARTNER_WAITING:
                if (src_epid < hw->my_epid) {
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
                        hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
                                FJES_RX_STOP_REQ_DONE;
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
                        clear_bit(src_epid, &hw->txrx_stop_req_bit);
                        set_bit(src_epid, &adapter->unshare_watch_bitmask);
@@ -861,14 +922,17 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
 {
        struct fjes_hw *hw = &adapter->hw;
        enum ep_partner_status status;
+       unsigned long flags;
 
        set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 
        status = fjes_hw_get_partner_ep_status(hw, src_epid);
        switch (status) {
        case EP_PARTNER_WAITING:
+               spin_lock_irqsave(&hw->rx_status_lock, flags);
                hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
                                FJES_RX_STOP_REQ_DONE;
+               spin_unlock_irqrestore(&hw->rx_status_lock, flags);
                clear_bit(src_epid, &hw->txrx_stop_req_bit);
                /* fall through */
        case EP_PARTNER_UNSHARE:
@@ -1001,13 +1065,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
        size_t frame_len;
        void *frame;
 
+       spin_lock(&hw->rx_status_lock);
        for (epidx = 0; epidx < hw->max_epid; epidx++) {
                if (epidx == hw->my_epid)
                        continue;
 
-               adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
-                       FJES_RX_POLL_WORK;
+               if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+                   EP_PARTNER_SHARED)
+                       adapter->hw.ep_shm_info[epidx]
+                                  .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
        }
+       spin_unlock(&hw->rx_status_lock);
 
        while (work_done < budget) {
                prefetch(&adapter->hw);
@@ -1065,13 +1133,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
                if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
                        napi_reschedule(napi);
                } else {
+                       spin_lock(&hw->rx_status_lock);
                        for (epidx = 0; epidx < hw->max_epid; epidx++) {
                                if (epidx == hw->my_epid)
                                        continue;
-                               adapter->hw.ep_shm_info[epidx]
-                                          .tx.info->v1i.rx_status &=
+                               if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+                                   EP_PARTNER_SHARED)
+                                       adapter->hw.ep_shm_info[epidx].tx
+                                                  .info->v1i.rx_status &=
                                                ~FJES_RX_POLL_WORK;
                        }
+                       spin_unlock(&hw->rx_status_lock);
 
                        fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
                }
@@ -1129,7 +1201,7 @@ static int fjes_probe(struct platform_device *plat_dev)
 
        res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
        hw->hw_res.start = res->start;
-       hw->hw_res.size = res->end - res->start + 1;
+       hw->hw_res.size = resource_size(res);
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
        err = fjes_hw_init(&adapter->hw);
        if (err)
@@ -1203,7 +1275,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
        netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
        netdev->netdev_ops = &fjes_netdev_ops;
        fjes_set_ethtool_ops(netdev);
-       netdev->mtu = fjes_support_mtu[0];
+       netdev->mtu = fjes_support_mtu[3];
        netdev->flags |= IFF_BROADCAST;
        netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
 }
@@ -1240,6 +1312,7 @@ static void fjes_watch_unshare_task(struct work_struct *work)
        int max_epid, my_epid, epidx;
        int stop_req, stop_req_done;
        ulong unshare_watch_bitmask;
+       unsigned long flags;
        int wait_time = 0;
        int is_shared;
        int ret;
@@ -1292,8 +1365,10 @@ static void fjes_watch_unshare_task(struct work_struct *work)
                        }
                        mutex_unlock(&hw->hw_info.lock);
 
+                       spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
                                            netdev->dev_addr, netdev->mtu);
+                       spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
                        clear_bit(epidx, &hw->txrx_stop_req_bit);
                        clear_bit(epidx, &unshare_watch_bitmask);
@@ -1331,9 +1406,12 @@ static void fjes_watch_unshare_task(struct work_struct *work)
                                }
                                mutex_unlock(&hw->hw_info.lock);
 
+                               spin_lock_irqsave(&hw->rx_status_lock, flags);
                                fjes_hw_setup_epbuf(
                                        &hw->ep_shm_info[epidx].tx,
                                        netdev->dev_addr, netdev->mtu);
+                               spin_unlock_irqrestore(&hw->rx_status_lock,
+                                                      flags);
 
                                clear_bit(epidx, &hw->txrx_stop_req_bit);
                                clear_bit(epidx, &unshare_watch_bitmask);
@@ -1341,8 +1419,11 @@ static void fjes_watch_unshare_task(struct work_struct *work)
                        }
 
                        if (test_bit(epidx, &unshare_watch_bitmask)) {
+                               spin_lock_irqsave(&hw->rx_status_lock, flags);
                                hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
                                                ~FJES_RX_STOP_REQ_DONE;
+                               spin_unlock_irqrestore(&hw->rx_status_lock,
+                                                      flags);
                        }
                }
        }
index a9fbf17eb256f8b8c648087625286fd5fc7173c0..a6dc11ce497f5c328581ee3db460eb645e3a8fa5 100644 (file)
@@ -495,8 +495,6 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
        int gh_len;
        int err = -ENOSYS;
 
-       udp_tunnel_gro_complete(skb, nhoff);
-
        gh = (struct genevehdr *)(skb->data + nhoff);
        gh_len = geneve_hlen(gh);
        type = gh->proto_type;
@@ -507,6 +505,9 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
+
+       skb_set_inner_mac_header(skb, nhoff + gh_len);
+
        return err;
 }
 
@@ -696,16 +697,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
+       if (unlikely(err))
                goto free_rt;
-       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
+       err = udp_tunnel_handle_offloads(skb, udp_sum);
+       if (err)
                goto free_rt;
-       }
 
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -733,16 +730,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
+       if (unlikely(err))
                goto free_dst;
-       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
+       err = udp_tunnel_handle_offloads(skb, udp_sum);
+       if (err)
                goto free_dst;
-       }
 
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -937,7 +930,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                err = geneve_build_skb(rt, skb, key->tun_flags, vni,
                                       info->options_len, opts, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
                ttl = key->ttl;
@@ -946,7 +939,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                err = geneve_build_skb(rt, skb, 0, geneve->vni,
                                       0, NULL, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
                ttl = geneve->ttl;
@@ -964,7 +957,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
 tx_error:
        dev_kfree_skb(skb);
-err:
+
        if (err == -ELOOP)
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
@@ -1026,7 +1019,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                                        info->options_len, opts,
                                        flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
                ttl = key->ttl;
@@ -1035,7 +1028,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                err = geneve6_build_skb(dst, skb, 0, geneve->vni,
                                        0, NULL, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
                                           iip, skb);
@@ -1054,7 +1047,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
 tx_error:
        dev_kfree_skb(skb);
-err:
+
        if (err == -ELOOP)
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
@@ -1180,7 +1173,7 @@ static struct device_type geneve_type = {
  * supply the listening GENEVE udp ports. Callers are expected
  * to implement the ndo_add_geneve_port.
  */
-void geneve_get_rx_port(struct net_device *dev)
+static void geneve_push_rx_ports(struct net_device *dev)
 {
        struct net *net = dev_net(dev);
        struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1189,6 +1182,9 @@ void geneve_get_rx_port(struct net_device *dev)
        struct sock *sk;
        __be16 port;
 
+       if (!dev->netdev_ops->ndo_add_geneve_port)
+               return;
+
        rcu_read_lock();
        list_for_each_entry_rcu(gs, &gn->sock_list, list) {
                sk = gs->sock->sk;
@@ -1198,7 +1194,6 @@ void geneve_get_rx_port(struct net_device *dev)
        }
        rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(geneve_get_rx_port);
 
 /* Initialize the device structure. */
 static void geneve_setup(struct net_device *dev)
@@ -1546,6 +1541,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
 
+static int geneve_netdevice_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+               geneve_push_rx_ports(dev);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block geneve_notifier_block __read_mostly = {
+       .notifier_call = geneve_netdevice_event,
+};
+
 static __net_init int geneve_init_net(struct net *net)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1598,11 +1608,18 @@ static int __init geneve_init_module(void)
        if (rc)
                goto out1;
 
-       rc = rtnl_link_register(&geneve_link_ops);
+       rc = register_netdevice_notifier(&geneve_notifier_block);
        if (rc)
                goto out2;
 
+       rc = rtnl_link_register(&geneve_link_ops);
+       if (rc)
+               goto out3;
+
        return 0;
+
+out3:
+       unregister_netdevice_notifier(&geneve_notifier_block);
 out2:
        unregister_pernet_subsys(&geneve_net_ops);
 out1:
@@ -1613,6 +1630,7 @@ late_initcall(geneve_init_module);
 static void __exit geneve_cleanup_module(void)
 {
        rtnl_link_unregister(&geneve_link_ops);
+       unregister_netdevice_notifier(&geneve_notifier_block);
        unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
new file mode 100644 (file)
index 0000000..f7caf1e
--- /dev/null
@@ -0,0 +1,1366 @@
+/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
+ *
+ * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
+ * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Author: Harald Welte <hwelte@sysmocom.de>
+ *        Pablo Neira Ayuso <pablo@netfilter.org>
+ *        Andreas Schultz <aschultz@travelping.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/rculist.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/file.h>
+#include <linux/gtp.h>
+
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/xfrm.h>
+#include <net/genetlink.h>
+#include <net/netns/generic.h>
+#include <net/gtp.h>
+
+/* An active session for the subscriber. */
+struct pdp_ctx {
+       struct hlist_node       hlist_tid;
+       struct hlist_node       hlist_addr;
+
+       union {
+               u64             tid;
+               struct {
+                       u64     tid;
+                       u16     flow;
+               } v0;
+               struct {
+                       u32     i_tei;
+                       u32     o_tei;
+               } v1;
+       } u;
+       u8                      gtp_version;
+       u16                     af;
+
+       struct in_addr          ms_addr_ip4;
+       struct in_addr          sgsn_addr_ip4;
+
+       atomic_t                tx_seq;
+       struct rcu_head         rcu_head;
+};
+
+/* One instance of the GTP device. */
+struct gtp_dev {
+       struct list_head        list;
+
+       struct socket           *sock0;
+       struct socket           *sock1u;
+
+       struct net              *net;
+       struct net_device       *dev;
+
+       unsigned int            hash_size;
+       struct hlist_head       *tid_hash;
+       struct hlist_head       *addr_hash;
+};
+
+static int gtp_net_id __read_mostly;
+
+struct gtp_net {
+       struct list_head gtp_dev_list;
+};
+
+static u32 gtp_h_initval;
+
+static inline u32 gtp0_hashfn(u64 tid)
+{
+       u32 *tid32 = (u32 *) &tid;
+       return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
+}
+
+static inline u32 gtp1u_hashfn(u32 tid)
+{
+       return jhash_1word(tid, gtp_h_initval);
+}
+
+static inline u32 ipv4_hashfn(__be32 ip)
+{
+       return jhash_1word((__force u32)ip, gtp_h_initval);
+}
+
+/* Resolve a PDP context structure based on the 64bit TID. */
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+{
+       struct hlist_head *head;
+       struct pdp_ctx *pdp;
+
+       head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
+
+       hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+               if (pdp->gtp_version == GTP_V0 &&
+                   pdp->u.v0.tid == tid)
+                       return pdp;
+       }
+       return NULL;
+}
+
+/* Resolve a PDP context structure based on the 32bit TEI. */
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+{
+       struct hlist_head *head;
+       struct pdp_ctx *pdp;
+
+       head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
+
+       hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+               if (pdp->gtp_version == GTP_V1 &&
+                   pdp->u.v1.i_tei == tid)
+                       return pdp;
+       }
+       return NULL;
+}
+
+/* Resolve a PDP context based on IPv4 address of MS. */
+static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
+{
+       struct hlist_head *head;
+       struct pdp_ctx *pdp;
+
+       head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
+
+       hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+               if (pdp->af == AF_INET &&
+                   pdp->ms_addr_ip4.s_addr == ms_addr)
+                       return pdp;
+       }
+
+       return NULL;
+}
+
+static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+                                 unsigned int hdrlen)
+{
+       struct iphdr *iph;
+
+       if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
+               return false;
+
+       iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+
+       return iph->saddr != pctx->ms_addr_ip4.s_addr;
+}
+
+/* Check if the inner IP source address in this packet is assigned to any
+ * existing mobile subscriber.
+ */
+static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+                            unsigned int hdrlen)
+{
+       switch (ntohs(skb->protocol)) {
+       case ETH_P_IP:
+               return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+       }
+       return false;
+}
+
+/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+                              bool xnet)
+{
+       unsigned int hdrlen = sizeof(struct udphdr) +
+                             sizeof(struct gtp0_header);
+       struct gtp0_header *gtp0;
+       struct pdp_ctx *pctx;
+       int ret = 0;
+
+       if (!pskb_may_pull(skb, hdrlen))
+               return -1;
+
+       gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+       if ((gtp0->flags >> 5) != GTP_V0)
+               return 1;
+
+       if (gtp0->type != GTP_TPDU)
+               return 1;
+
+       rcu_read_lock();
+       pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+       if (!pctx) {
+               netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+               ret = -1;
+               goto out_rcu;
+       }
+
+       if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+               netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+               ret = -1;
+               goto out_rcu;
+       }
+       rcu_read_unlock();
+
+       /* Get rid of the GTP + UDP headers. */
+       return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+       rcu_read_unlock();
+       return ret;
+}
+
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+                               bool xnet)
+{
+       unsigned int hdrlen = sizeof(struct udphdr) +
+                             sizeof(struct gtp1_header);
+       struct gtp1_header *gtp1;
+       struct pdp_ctx *pctx;
+       int ret = 0;
+
+       if (!pskb_may_pull(skb, hdrlen))
+               return -1;
+
+       gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+       if ((gtp1->flags >> 5) != GTP_V1)
+               return 1;
+
+       if (gtp1->type != GTP_TPDU)
+               return 1;
+
+       /* From 29.060: "This field shall be present if and only if any one or
+        * more of the S, PN and E flags are set.".
+        *
+        * If any of the bit is set, then the remaining ones also have to be
+        * set.
+        */
+       if (gtp1->flags & GTP1_F_MASK)
+               hdrlen += 4;
+
+       /* Make sure the header is larger enough, including extensions. */
+       if (!pskb_may_pull(skb, hdrlen))
+               return -1;
+
+       gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+       rcu_read_lock();
+       pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+       if (!pctx) {
+               netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+               ret = -1;
+               goto out_rcu;
+       }
+
+       if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+               netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+               ret = -1;
+               goto out_rcu;
+       }
+       rcu_read_unlock();
+
+       /* Get rid of the GTP + UDP headers. */
+       return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+       rcu_read_unlock();
+       return ret;
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+       if (gtp->sock0 && gtp->sock0->sk) {
+               udp_sk(gtp->sock0->sk)->encap_type = 0;
+               rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
+       }
+       if (gtp->sock1u && gtp->sock1u->sk) {
+               udp_sk(gtp->sock1u->sk)->encap_type = 0;
+               rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
+       }
+
+       gtp->sock0 = NULL;
+       gtp->sock1u = NULL;
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+       struct gtp_dev *gtp;
+
+       gtp = rcu_dereference_sk_user_data(sk);
+       if (gtp)
+               gtp_encap_disable(gtp);
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
+ */
+static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct pcpu_sw_netstats *stats;
+       struct gtp_dev *gtp;
+       bool xnet;
+       int ret;
+
+       gtp = rcu_dereference_sk_user_data(sk);
+       if (!gtp)
+               return 1;
+
+       netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+       xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+
+       switch (udp_sk(sk)->encap_type) {
+       case UDP_ENCAP_GTP0:
+               netdev_dbg(gtp->dev, "received GTP0 packet\n");
+               ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+               break;
+       case UDP_ENCAP_GTP1U:
+               netdev_dbg(gtp->dev, "received GTP1U packet\n");
+               ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+               break;
+       default:
+               ret = -1; /* Shouldn't happen. */
+       }
+
+       switch (ret) {
+       case 1:
+               netdev_dbg(gtp->dev, "pass up to the process\n");
+               return 1;
+       case 0:
+               netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
+               break;
+       case -1:
+               netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       /* Now that the UDP and the GTP header have been removed, set up the
+        * new network header. This is required by the upper layer to
+        * calculate the transport header.
+        */
+       skb_reset_network_header(skb);
+
+       skb->dev = gtp->dev;
+
+       stats = this_cpu_ptr(gtp->dev->tstats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return 0;
+}
+
+static int gtp_dev_init(struct net_device *dev)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+
+       gtp->dev = dev;
+
+       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void gtp_dev_uninit(struct net_device *dev)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+
+       gtp_encap_disable(gtp);
+       free_percpu(dev->tstats);
+}
+
+static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
+                                          const struct sock *sk, __be32 daddr)
+{
+       memset(fl4, 0, sizeof(*fl4));
+       fl4->flowi4_oif         = sk->sk_bound_dev_if;
+       fl4->daddr              = daddr;
+       fl4->saddr              = inet_sk(sk)->inet_saddr;
+       fl4->flowi4_tos         = RT_CONN_FLAGS(sk);
+       fl4->flowi4_proto       = sk->sk_protocol;
+
+       return ip_route_output_key(net, fl4);
+}
+
+static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+       int payload_len = skb->len;
+       struct gtp0_header *gtp0;
+
+       gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
+
+       gtp0->flags     = 0x1e; /* v0, GTP-non-prime. */
+       gtp0->type      = GTP_TPDU;
+       gtp0->length    = htons(payload_len);
+       gtp0->seq       = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
+       gtp0->flow      = htons(pctx->u.v0.flow);
+       gtp0->number    = 0xff;
+       gtp0->spare[0]  = gtp0->spare[1] = gtp0->spare[2] = 0xff;
+       gtp0->tid       = cpu_to_be64(pctx->u.v0.tid);
+}
+
+static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+       int payload_len = skb->len;
+       struct gtp1_header *gtp1;
+
+       gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
+
+       /* Bits    8  7  6  5  4  3  2  1
+        *        +--+--+--+--+--+--+--+--+
+        *        |version |PT| 1| E| S|PN|
+        *        +--+--+--+--+--+--+--+--+
+        *          0  0  1  1  1  0  0  0
+        */
+       gtp1->flags     = 0x38; /* v1, GTP-non-prime. */
+       gtp1->type      = GTP_TPDU;
+       gtp1->length    = htons(payload_len);
+       gtp1->tid       = htonl(pctx->u.v1.o_tei);
+
+       /* TODO: Suppport for extension header, sequence number and N-PDU.
+        *       Update the length field if any of them is available.
+        */
+}
+
+struct gtp_pktinfo {
+       struct sock             *sk;
+       struct iphdr            *iph;
+       struct flowi4           fl4;
+       struct rtable           *rt;
+       struct pdp_ctx          *pctx;
+       struct net_device       *dev;
+       __be16                  gtph_port;
+};
+
+static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+{
+       switch (pktinfo->pctx->gtp_version) {
+       case GTP_V0:
+               pktinfo->gtph_port = htons(GTP0_PORT);
+               gtp0_push_header(skb, pktinfo->pctx);
+               break;
+       case GTP_V1:
+               pktinfo->gtph_port = htons(GTP1U_PORT);
+               gtp1_push_header(skb, pktinfo->pctx);
+               break;
+       }
+}
+
+static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
+                                       struct sock *sk, struct iphdr *iph,
+                                       struct pdp_ctx *pctx, struct rtable *rt,
+                                       struct flowi4 *fl4,
+                                       struct net_device *dev)
+{
+       pktinfo->sk     = sk;
+       pktinfo->iph    = iph;
+       pktinfo->pctx   = pctx;
+       pktinfo->rt     = rt;
+       pktinfo->fl4    = *fl4;
+       pktinfo->dev    = dev;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+                            struct gtp_pktinfo *pktinfo)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+       struct pdp_ctx *pctx;
+       struct rtable *rt;
+       struct flowi4 fl4;
+       struct iphdr *iph;
+       struct sock *sk;
+       __be16 df;
+       int mtu;
+
+       /* Read the IP destination address and resolve the PDP context.
+        * Prepend PDP header with TEI/TID from PDP ctx.
+        */
+       iph = ip_hdr(skb);
+       pctx = ipv4_pdp_find(gtp, iph->daddr);
+       if (!pctx) {
+               netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+                          &iph->daddr);
+               return -ENOENT;
+       }
+       netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+               if (gtp->sock0)
+                       sk = gtp->sock0->sk;
+               else
+                       sk = NULL;
+               break;
+       case GTP_V1:
+               if (gtp->sock1u)
+                       sk = gtp->sock1u->sk;
+               else
+                       sk = NULL;
+               break;
+       default:
+               return -ENOENT;
+       }
+
+       if (!sk) {
+               netdev_dbg(dev, "no userspace socket is available, skip\n");
+               return -ENOENT;
+       }
+
+       rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
+                                 pctx->sgsn_addr_ip4.s_addr);
+       if (IS_ERR(rt)) {
+               netdev_dbg(dev, "no route to SSGN %pI4\n",
+                          &pctx->sgsn_addr_ip4.s_addr);
+               dev->stats.tx_carrier_errors++;
+               goto err;
+       }
+
+       if (rt->dst.dev == dev) {
+               netdev_dbg(dev, "circular route to SSGN %pI4\n",
+                          &pctx->sgsn_addr_ip4.s_addr);
+               dev->stats.collisions++;
+               goto err_rt;
+       }
+
+       skb_dst_drop(skb);
+
+       /* This is similar to tnl_update_pmtu(). */
+       df = iph->frag_off;
+       if (df) {
+               mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+                       sizeof(struct iphdr) - sizeof(struct udphdr);
+               switch (pctx->gtp_version) {
+               case GTP_V0:
+                       mtu -= sizeof(struct gtp0_header);
+                       break;
+               case GTP_V1:
+                       mtu -= sizeof(struct gtp1_header);
+                       break;
+               }
+       } else {
+               mtu = dst_mtu(&rt->dst);
+       }
+
+       rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+
+       if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+           mtu < ntohs(iph->tot_len)) {
+               netdev_dbg(dev, "packet too big, fragmentation needed\n");
+               memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                         htonl(mtu));
+               goto err_rt;
+       }
+
+       gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+       gtp_push_header(skb, pktinfo);
+
+       return 0;
+err_rt:
+       ip_rt_put(rt);
+err:
+       return -EBADMSG;
+}
+
+static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       unsigned int proto = ntohs(skb->protocol);
+       struct gtp_pktinfo pktinfo;
+       int err;
+
+       /* Ensure there is sufficient headroom. */
+       if (skb_cow_head(skb, dev->needed_headroom))
+               goto tx_err;
+
+       skb_reset_inner_headers(skb);
+
+       /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+       rcu_read_lock();
+       switch (proto) {
+       case ETH_P_IP:
+               err = gtp_build_skb_ip4(skb, dev, &pktinfo);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+       rcu_read_unlock();
+
+       if (err < 0)
+               goto tx_err;
+
+       switch (proto) {
+       case ETH_P_IP:
+               netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+                          &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+               udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+                                   pktinfo.fl4.saddr, pktinfo.fl4.daddr,
+                                   pktinfo.iph->tos,
+                                   ip4_dst_hoplimit(&pktinfo.rt->dst),
+                                   htons(IP_DF),
+                                   pktinfo.gtph_port, pktinfo.gtph_port,
+                                   true, false);
+               break;
+       }
+
+       return NETDEV_TX_OK;
+tx_err:
+       dev->stats.tx_errors++;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops gtp_netdev_ops = {
+       .ndo_init               = gtp_dev_init,
+       .ndo_uninit             = gtp_dev_uninit,
+       .ndo_start_xmit         = gtp_dev_xmit,
+       .ndo_get_stats64        = ip_tunnel_get_stats64,
+};
+
+static void gtp_link_setup(struct net_device *dev)
+{
+       dev->netdev_ops         = &gtp_netdev_ops;
+       dev->destructor         = free_netdev;
+
+       dev->hard_header_len = 0;
+       dev->addr_len = 0;
+
+       /* Zero header length. */
+       dev->type = ARPHRD_NONE;
+       dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+
+       dev->priv_flags |= IFF_NO_QUEUE;
+       dev->features   |= NETIF_F_LLTX;
+       netif_keep_dst(dev);
+
+       /* Assume largest header, ie. GTPv0. */
+       dev->needed_headroom    = LL_MAX_HEADER +
+                                 sizeof(struct iphdr) +
+                                 sizeof(struct udphdr) +
+                                 sizeof(struct gtp0_header);
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
+static void gtp_hashtable_free(struct gtp_dev *gtp);
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+                           int fd_gtp0, int fd_gtp1, struct net *src_net);
+
+static int gtp_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       int hashsize, err, fd0, fd1;
+       struct gtp_dev *gtp;
+       struct gtp_net *gn;
+
+       if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+               return -EINVAL;
+
+       gtp = netdev_priv(dev);
+
+       fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+       fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+       err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+       if (err < 0)
+               goto out_err;
+
+       if (!data[IFLA_GTP_PDP_HASHSIZE])
+               hashsize = 1024;
+       else
+               hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+
+       err = gtp_hashtable_new(gtp, hashsize);
+       if (err < 0)
+               goto out_encap;
+
+       err = register_netdevice(dev);
+       if (err < 0) {
+               netdev_dbg(dev, "failed to register new netdev %d\n", err);
+               goto out_hashtable;
+       }
+
+       gn = net_generic(dev_net(dev), gtp_net_id);
+       list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+
+       netdev_dbg(dev, "registered new GTP interface\n");
+
+       return 0;
+
+out_hashtable:
+       gtp_hashtable_free(gtp);
+out_encap:
+       gtp_encap_disable(gtp);
+out_err:
+       return err;
+}
+
+static void gtp_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+
+       gtp_encap_disable(gtp);
+       gtp_hashtable_free(gtp);
+       list_del_rcu(&gtp->list);
+       unregister_netdevice_queue(dev, head);
+}
+
+static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
+       [IFLA_GTP_FD0]                  = { .type = NLA_U32 },
+       [IFLA_GTP_FD1]                  = { .type = NLA_U32 },
+       [IFLA_GTP_PDP_HASHSIZE]         = { .type = NLA_U32 },
+};
+
+static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (!data)
+               return -EINVAL;
+
+       return 0;
+}
+
+static size_t gtp_get_size(const struct net_device *dev)
+{
+       return nla_total_size(sizeof(__u32));   /* IFLA_GTP_PDP_HASHSIZE */
+}
+
+static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+
+       if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops gtp_link_ops __read_mostly = {
+       .kind           = "gtp",
+       .maxtype        = IFLA_GTP_MAX,
+       .policy         = gtp_policy,
+       .priv_size      = sizeof(struct gtp_dev),
+       .setup          = gtp_link_setup,
+       .validate       = gtp_validate,
+       .newlink        = gtp_newlink,
+       .dellink        = gtp_dellink,
+       .get_size       = gtp_get_size,
+       .fill_info      = gtp_fill_info,
+};
+
+static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
+{
+       struct net *net;
+
+       /* Examine the link attributes and figure out which network namespace
+        * we are talking about.
+        */
+       if (tb[GTPA_NET_NS_FD])
+               net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
+       else
+               net = get_net(src_net);
+
+       return net;
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
+{
+       int i;
+
+       gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+       if (gtp->addr_hash == NULL)
+               return -ENOMEM;
+
+       gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+       if (gtp->tid_hash == NULL)
+               goto err1;
+
+       gtp->hash_size = hsize;
+
+       for (i = 0; i < hsize; i++) {
+               INIT_HLIST_HEAD(&gtp->addr_hash[i]);
+               INIT_HLIST_HEAD(&gtp->tid_hash[i]);
+       }
+       return 0;
+err1:
+       kfree(gtp->addr_hash);
+       return -ENOMEM;
+}
+
+static void gtp_hashtable_free(struct gtp_dev *gtp)
+{
+       struct pdp_ctx *pctx;
+       int i;
+
+       for (i = 0; i < gtp->hash_size; i++) {
+               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+                       hlist_del_rcu(&pctx->hlist_tid);
+                       hlist_del_rcu(&pctx->hlist_addr);
+                       kfree_rcu(pctx, rcu_head);
+               }
+       }
+       synchronize_rcu();
+       kfree(gtp->addr_hash);
+       kfree(gtp->tid_hash);
+}
+
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+                           int fd_gtp0, int fd_gtp1, struct net *src_net)
+{
+       struct udp_tunnel_sock_cfg tuncfg = {NULL};
+       struct socket *sock0, *sock1u;
+       int err;
+
+       netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
+
+       sock0 = sockfd_lookup(fd_gtp0, &err);
+       if (sock0 == NULL) {
+               netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
+               return -ENOENT;
+       }
+
+       if (sock0->sk->sk_protocol != IPPROTO_UDP) {
+               netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
+               err = -EINVAL;
+               goto err1;
+       }
+
+       sock1u = sockfd_lookup(fd_gtp1, &err);
+       if (sock1u == NULL) {
+               netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
+               err = -ENOENT;
+               goto err1;
+       }
+
+       if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
+               netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
+               err = -EINVAL;
+               goto err2;
+       }
+
+       netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
+
+       gtp->sock0 = sock0;
+       gtp->sock1u = sock1u;
+       gtp->net = src_net;
+
+       tuncfg.sk_user_data = gtp;
+       tuncfg.encap_rcv = gtp_encap_recv;
+       tuncfg.encap_destroy = gtp_encap_destroy;
+
+       tuncfg.encap_type = UDP_ENCAP_GTP0;
+       setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
+
+       tuncfg.encap_type = UDP_ENCAP_GTP1U;
+       setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+
+       err = 0;
+err2:
+       sockfd_put(sock1u);
+err1:
+       sockfd_put(sock0);
+       return err;
+}
+
+static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+{
+       struct gtp_net *gn = net_generic(net, gtp_net_id);
+       struct gtp_dev *gtp;
+
+       list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+               if (ifindex == gtp->dev->ifindex)
+                       return gtp->dev;
+       }
+       return NULL;
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+       pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+       pctx->af = AF_INET;
+       pctx->sgsn_addr_ip4.s_addr =
+               nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+       pctx->ms_addr_ip4.s_addr =
+               nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+               /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
+                * label needs to be the same for uplink and downlink packets,
+                * so let's annotate this.
+                */
+               pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
+               pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
+               break;
+       case GTP_V1:
+               pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
+               pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
+               break;
+       default:
+               break;
+       }
+}
+
+static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+       u32 hash_ms, hash_tid = 0;
+       struct pdp_ctx *pctx;
+       bool found = false;
+       __be32 ms_addr;
+
+       ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+       hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+
+       hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
+               if (pctx->ms_addr_ip4.s_addr == ms_addr) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (found) {
+               if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+
+               ipv4_pdp_fill(pctx, info);
+
+               if (pctx->gtp_version == GTP_V0)
+                       netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
+                                  pctx->u.v0.tid, pctx);
+               else if (pctx->gtp_version == GTP_V1)
+                       netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
+                                  pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+               return 0;
+
+       }
+
+       pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+       if (pctx == NULL)
+               return -ENOMEM;
+
+       ipv4_pdp_fill(pctx, info);
+       atomic_set(&pctx->tx_seq, 0);
+
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+               /* TS 09.60: "The flow label identifies unambiguously a GTP
+                * flow.". We use the tid for this instead, I cannot find a
+                * situation in which this doesn't unambiguosly identify the
+                * PDP context.
+                */
+               hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
+               break;
+       case GTP_V1:
+               hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
+               break;
+       }
+
+       hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
+       hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
+
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+               netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+                          pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+                          &pctx->ms_addr_ip4, pctx);
+               break;
+       case GTP_V1:
+               netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+                          pctx->u.v1.i_tei, pctx->u.v1.o_tei,
+                          &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+               break;
+       }
+
+       return 0;
+}
+
+static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net_device *dev;
+       struct net *net;
+
+       if (!info->attrs[GTPA_VERSION] ||
+           !info->attrs[GTPA_LINK] ||
+           !info->attrs[GTPA_SGSN_ADDRESS] ||
+           !info->attrs[GTPA_MS_ADDRESS])
+               return -EINVAL;
+
+       switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+       case GTP_V0:
+               if (!info->attrs[GTPA_TID] ||
+                   !info->attrs[GTPA_FLOW])
+                       return -EINVAL;
+               break;
+       case GTP_V1:
+               if (!info->attrs[GTPA_I_TEI] ||
+                   !info->attrs[GTPA_O_TEI])
+                       return -EINVAL;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+       if (IS_ERR(net))
+               return PTR_ERR(net);
+
+       /* Check if there's an existing gtpX device to configure */
+       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+       if (dev == NULL)
+               return -ENODEV;
+
+       return ipv4_pdp_add(dev, info);
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net_device *dev;
+       struct pdp_ctx *pctx;
+       struct gtp_dev *gtp;
+       struct net *net;
+
+       if (!info->attrs[GTPA_VERSION] ||
+           !info->attrs[GTPA_LINK])
+               return -EINVAL;
+
+       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+       if (IS_ERR(net))
+               return PTR_ERR(net);
+
+       /* Check if there's an existing gtpX device to configure */
+       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+       if (dev == NULL)
+               return -ENODEV;
+
+       gtp = netdev_priv(dev);
+
+       switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+       case GTP_V0:
+               if (!info->attrs[GTPA_TID])
+                       return -EINVAL;
+               pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
+               break;
+       case GTP_V1:
+               if (!info->attrs[GTPA_I_TEI])
+                       return -EINVAL;
+               pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (pctx == NULL)
+               return -ENOENT;
+
+       if (pctx->gtp_version == GTP_V0)
+               netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+                          pctx->u.v0.tid, pctx);
+       else if (pctx->gtp_version == GTP_V1)
+               netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+                          pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+       hlist_del_rcu(&pctx->hlist_tid);
+       hlist_del_rcu(&pctx->hlist_addr);
+       kfree_rcu(pctx, rcu_head);
+
+       return 0;
+}
+
+static struct genl_family gtp_genl_family = {
+       .id             = GENL_ID_GENERATE,
+       .name           = "gtp",
+       .version        = 0,
+       .hdrsize        = 0,
+       .maxattr        = GTPA_MAX,
+       .netnsok        = true,
+};
+
+static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+                             u32 type, struct pdp_ctx *pctx)
+{
+       void *genlh;
+
+       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+                           type);
+       if (genlh == NULL)
+               goto nlmsg_failure;
+
+       if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+           nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+           nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+               goto nla_put_failure;
+
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+               if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
+                   nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
+                       goto nla_put_failure;
+               break;
+       case GTP_V1:
+               if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
+                   nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
+                       goto nla_put_failure;
+               break;
+       }
+       genlmsg_end(skb, genlh);
+       return 0;
+
+nlmsg_failure:
+nla_put_failure:
+       genlmsg_cancel(skb, genlh);
+       return -EMSGSIZE;
+}
+
+static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+       struct pdp_ctx *pctx = NULL;
+       struct net_device *dev;
+       struct sk_buff *skb2;
+       struct gtp_dev *gtp;
+       u32 gtp_version;
+       struct net *net;
+       int err;
+
+       if (!info->attrs[GTPA_VERSION] ||
+           !info->attrs[GTPA_LINK])
+               return -EINVAL;
+
+       gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+       switch (gtp_version) {
+       case GTP_V0:
+       case GTP_V1:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+       if (IS_ERR(net))
+               return PTR_ERR(net);
+
+       /* Check if there's an existing gtpX device to configure */
+       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+       if (dev == NULL)
+               return -ENODEV;
+
+       gtp = netdev_priv(dev);
+
+       rcu_read_lock();
+       if (gtp_version == GTP_V0 &&
+           info->attrs[GTPA_TID]) {
+               u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
+
+               pctx = gtp0_pdp_find(gtp, tid);
+       } else if (gtp_version == GTP_V1 &&
+                info->attrs[GTPA_I_TEI]) {
+               u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
+
+               pctx = gtp1_pdp_find(gtp, tid);
+       } else if (info->attrs[GTPA_MS_ADDRESS]) {
+               __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+               pctx = ipv4_pdp_find(gtp, ip);
+       }
+
+       if (pctx == NULL) {
+               err = -ENOENT;
+               goto err_unlock;
+       }
+
+       skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+       if (skb2 == NULL) {
+               err = -ENOMEM;
+               goto err_unlock;
+       }
+
+       err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
+                                info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+       if (err < 0)
+               goto err_unlock_free;
+
+       rcu_read_unlock();
+       return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
+
+err_unlock_free:
+       kfree_skb(skb2);
+err_unlock:
+       rcu_read_unlock();
+       return err;
+}
+
+static int gtp_genl_dump_pdp(struct sk_buff *skb,
+                               struct netlink_callback *cb)
+{
+       struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+       struct net *net = sock_net(skb->sk);
+       struct gtp_net *gn = net_generic(net, gtp_net_id);
+       unsigned long tid = cb->args[1];
+       int i, k = cb->args[0], ret;
+       struct pdp_ctx *pctx;
+
+       if (cb->args[4])
+               return 0;
+
+       list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+               if (last_gtp && last_gtp != gtp)
+                       continue;
+               else
+                       last_gtp = NULL;
+
+               for (i = k; i < gtp->hash_size; i++) {
+                       hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+                               if (tid && tid != pctx->u.tid)
+                                       continue;
+                               else
+                                       tid = 0;
+
+                               ret = gtp_genl_fill_info(skb,
+                                                        NETLINK_CB(cb->skb).portid,
+                                                        cb->nlh->nlmsg_seq,
+                                                        cb->nlh->nlmsg_type, pctx);
+                               if (ret < 0) {
+                                       cb->args[0] = i;
+                                       cb->args[1] = pctx->u.tid;
+                                       cb->args[2] = (unsigned long)gtp;
+                                       goto out;
+                               }
+                       }
+               }
+       }
+       cb->args[4] = 1;
+out:
+       return skb->len;
+}
+
+static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+       [GTPA_LINK]             = { .type = NLA_U32, },
+       [GTPA_VERSION]          = { .type = NLA_U32, },
+       [GTPA_TID]              = { .type = NLA_U64, },
+       [GTPA_SGSN_ADDRESS]     = { .type = NLA_U32, },
+       [GTPA_MS_ADDRESS]       = { .type = NLA_U32, },
+       [GTPA_FLOW]             = { .type = NLA_U16, },
+       [GTPA_NET_NS_FD]        = { .type = NLA_U32, },
+       [GTPA_I_TEI]            = { .type = NLA_U32, },
+       [GTPA_O_TEI]            = { .type = NLA_U32, },
+};
+
+static const struct genl_ops gtp_genl_ops[] = {
+       {
+               .cmd = GTP_CMD_NEWPDP,
+               .doit = gtp_genl_new_pdp,
+               .policy = gtp_genl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = GTP_CMD_DELPDP,
+               .doit = gtp_genl_del_pdp,
+               .policy = gtp_genl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = GTP_CMD_GETPDP,
+               .doit = gtp_genl_get_pdp,
+               .dumpit = gtp_genl_dump_pdp,
+               .policy = gtp_genl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+};
+
+static int __net_init gtp_net_init(struct net *net)
+{
+       struct gtp_net *gn = net_generic(net, gtp_net_id);
+
+       INIT_LIST_HEAD(&gn->gtp_dev_list);
+       return 0;
+}
+
+static void __net_exit gtp_net_exit(struct net *net)
+{
+       struct gtp_net *gn = net_generic(net, gtp_net_id);
+       struct gtp_dev *gtp;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+               gtp_dellink(gtp->dev, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
+static struct pernet_operations gtp_net_ops = {
+       .init   = gtp_net_init,
+       .exit   = gtp_net_exit,
+       .id     = &gtp_net_id,
+       .size   = sizeof(struct gtp_net),
+};
+
+static int __init gtp_init(void)
+{
+       int err;
+
+       get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+
+       err = rtnl_link_register(&gtp_link_ops);
+       if (err < 0)
+               goto error_out;
+
+       err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
+       if (err < 0)
+               goto unreg_rtnl_link;
+
+       err = register_pernet_subsys(&gtp_net_ops);
+       if (err < 0)
+               goto unreg_genl_family;
+
+       pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n",
+               sizeof(struct pdp_ctx));
+       return 0;
+
+unreg_genl_family:
+       genl_unregister_family(&gtp_genl_family);
+unreg_rtnl_link:
+       rtnl_link_unregister(&gtp_link_ops);
+error_out:
+       pr_err("error loading GTP module loaded\n");
+       return err;
+}
+late_initcall(gtp_init);
+
+static void __exit gtp_fini(void)
+{
+       unregister_pernet_subsys(&gtp_net_ops);
+       genl_unregister_family(&gtp_genl_family);
+       rtnl_link_unregister(&gtp_link_ops);
+
+       pr_info("GTP module unloaded\n");
+}
+module_exit(gtp_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
+MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("gtp");
index 72c9f1f352b4ec686a073b48f8df52d6245f771f..eb6663866c9f5795e3089d67514a4eb5e8ed698f 100644 (file)
@@ -780,8 +780,10 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
-       if (bc->skb)
-               return NETDEV_TX_LOCKED;
+       if (bc->skb) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
        /* strip KISS byte */
        if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
                dev_kfree_skb(skb);
index 49fe59b180a8619f554d5852202df933fee54871..4bad0b894e9c9fccbe89ae0427a01a4d07f226e4 100644 (file)
@@ -412,8 +412,10 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
-       if (sm->skb)
-               return NETDEV_TX_LOCKED;
+       if (sm->skb) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
        netif_stop_queue(dev);
        sm->skb = skb;
        return NETDEV_TX_OK;
index 85828f1534454dbfd4020e6407e6ad2a136af93c..1dfe2304daa76431d32a153bae66a82ac0e96c6e 100644 (file)
@@ -519,7 +519,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += actual;
 
-       ax->dev->trans_start = jiffies;
+       netif_trans_update(ax->dev);
        ax->xleft = count - actual;
        ax->xhead = ax->xbuff + actual;
 }
@@ -542,7 +542,7 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
                 * May be we must check transmitter timeout here ?
                 *      14 Oct 1994 Dmitry Gorodchanin.
                 */
-               if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+               if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
                        /* 20 sec timeout not reached */
                        return NETDEV_TX_BUSY;
                }
index ce88df33fe17b20237b3f4f2e0c9e1ca7b4b63a3..b8083161ef461d87c613323c2893cac4ba2550b6 100644 (file)
@@ -1669,7 +1669,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
                dev_kfree_skb(skb_del);
        }
        skb_queue_tail(&scc->tx_queue, skb);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        
 
        /*
index 1a4729c36aa49d93f8e536cd98d4731df704621d..aaff07c10058520fdba0b9e419bc95d8219a381c 100644 (file)
@@ -601,7 +601,7 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
                return ax25_ip_xmit(skb);
 
        skb_queue_tail(&yp->send_queue, skb);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        return NETDEV_TX_OK;
 }
 
index 8b3bd8ecd1c469ee5c2d05fe47716b13dbed7d5b..6700a4dca7c82c55340b645a94e1a86d6590d41e 100644 (file)
@@ -202,6 +202,8 @@ int rndis_filter_receive(struct hv_device *dev,
 int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
 
+void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf);
+
 #define NVSP_INVALID_PROTOCOL_VERSION  ((u32)0xFFFFFFFF)
 
 #define NVSP_PROTOCOL_VERSION_1                2
@@ -641,6 +643,12 @@ struct netvsc_reconfig {
        u32 event;
 };
 
+struct garp_wrk {
+       struct work_struct dwrk;
+       struct net_device *netdev;
+       struct netvsc_device *netvsc_dev;
+};
+
 /* The context of the netvsc device  */
 struct net_device_context {
        /* point back to our device context */
@@ -656,6 +664,7 @@ struct net_device_context {
 
        struct work_struct work;
        u32 msg_enable; /* debug level */
+       struct garp_wrk gwrk;
 
        struct netvsc_stats __percpu *tx_stats;
        struct netvsc_stats __percpu *rx_stats;
@@ -730,6 +739,11 @@ struct netvsc_device {
        u32 vf_alloc;
        /* Serial number of the VF to team with */
        u32 vf_serial;
+       atomic_t open_cnt;
+       /* State to manage the associated VF interface. */
+       bool vf_inject;
+       struct net_device *vf_netdev;
+       atomic_t vf_use_cnt;
 };
 
 /* NdisInitialize message */
index ec313fc08d82a3b6a3d8d79fd7a6527caa30733a..eddce3cdafa8b5cdc7f0421cad77b55dec762068 100644 (file)
 
 #include "hyperv_net.h"
 
+/*
+ * Switch the data path from the synthetic interface to the VF
+ * interface.
+ */
+void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf)
+{
+       struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+       struct hv_device *dev = nv_dev->dev;
+
+       memset(init_pkt, 0, sizeof(struct nvsp_message));
+       init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
+       if (vf)
+               init_pkt->msg.v4_msg.active_dp.active_datapath =
+                       NVSP_DATAPATH_VF;
+       else
+               init_pkt->msg.v4_msg.active_dp.active_datapath =
+                       NVSP_DATAPATH_SYNTHETIC;
+
+       vmbus_sendpacket(dev->channel, init_pkt,
+                              sizeof(struct nvsp_message),
+                              (unsigned long)init_pkt,
+                              VM_PKT_DATA_INBAND, 0);
+}
+
 
 static struct netvsc_device *alloc_net_device(struct hv_device *device)
 {
@@ -52,11 +76,16 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        init_waitqueue_head(&net_device->wait_drain);
        net_device->start_remove = false;
        net_device->destroy = false;
+       atomic_set(&net_device->open_cnt, 0);
+       atomic_set(&net_device->vf_use_cnt, 0);
        net_device->dev = device;
        net_device->ndev = ndev;
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
+       net_device->vf_netdev = NULL;
+       net_device->vf_inject = false;
+
        hv_set_drvdata(device, net_device);
        return net_device;
 }
index b8121eba33ff6614fc226104da6c8b75096b46c8..ba3f3f3d48efcb27b34b1aa6be1786d6839751c3 100644 (file)
@@ -610,42 +610,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
        schedule_delayed_work(&ndev_ctx->dwork, 0);
 }
 
-/*
- * netvsc_recv_callback -  Callback when we receive a packet from the
- * "wire" on the specified device.
- */
-int netvsc_recv_callback(struct hv_device *device_obj,
+
+static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
                                struct hv_netvsc_packet *packet,
-                               void **data,
                                struct ndis_tcp_ip_checksum_info *csum_info,
-                               struct vmbus_channel *channel,
-                               u16 vlan_tci)
+                               void *data, u16 vlan_tci)
 {
-       struct net_device *net;
-       struct net_device_context *net_device_ctx;
        struct sk_buff *skb;
-       struct netvsc_stats *rx_stats;
 
-       net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
-       if (!net || net->reg_state != NETREG_REGISTERED) {
-               return NVSP_STAT_FAIL;
-       }
-       net_device_ctx = netdev_priv(net);
-       rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
-
-       /* Allocate a skb - TODO direct I/O to pages? */
        skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
-       if (unlikely(!skb)) {
-               ++net->stats.rx_dropped;
-               return NVSP_STAT_FAIL;
-       }
+       if (!skb)
+               return skb;
 
        /*
         * Copy to skb. This copy is needed here since the memory pointed by
         * hv_netvsc_packet cannot be deallocated
         */
-       memcpy(skb_put(skb, packet->total_data_buflen), *data,
-               packet->total_data_buflen);
+       memcpy(skb_put(skb, packet->total_data_buflen), data,
+              packet->total_data_buflen);
 
        skb->protocol = eth_type_trans(skb, net);
        if (csum_info) {
@@ -663,6 +645,75 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       vlan_tci);
 
+       return skb;
+}
+
+/*
+ * netvsc_recv_callback -  Callback when we receive a packet from the
+ * "wire" on the specified device.
+ */
+int netvsc_recv_callback(struct hv_device *device_obj,
+                               struct hv_netvsc_packet *packet,
+                               void **data,
+                               struct ndis_tcp_ip_checksum_info *csum_info,
+                               struct vmbus_channel *channel,
+                               u16 vlan_tci)
+{
+       struct net_device *net;
+       struct net_device_context *net_device_ctx;
+       struct sk_buff *skb;
+       struct sk_buff *vf_skb;
+       struct netvsc_stats *rx_stats;
+       struct netvsc_device *netvsc_dev = hv_get_drvdata(device_obj);
+       u32 bytes_recvd = packet->total_data_buflen;
+       int ret = 0;
+
+       net = netvsc_dev->ndev;
+       if (!net || net->reg_state != NETREG_REGISTERED)
+               return NVSP_STAT_FAIL;
+
+       if (READ_ONCE(netvsc_dev->vf_inject)) {
+               atomic_inc(&netvsc_dev->vf_use_cnt);
+               if (!READ_ONCE(netvsc_dev->vf_inject)) {
+                       /*
+                        * We raced; just move on.
+                        */
+                       atomic_dec(&netvsc_dev->vf_use_cnt);
+                       goto vf_injection_done;
+               }
+
+               /*
+                * Inject this packet into the VF inerface.
+                * On Hyper-V, multicast and brodcast packets
+                * are only delivered on the synthetic interface
+                * (after subjecting these to policy filters on
+                * the host). Deliver these via the VF interface
+                * in the guest.
+                */
+               vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
+                                              csum_info, *data, vlan_tci);
+               if (vf_skb != NULL) {
+                       ++netvsc_dev->vf_netdev->stats.rx_packets;
+                       netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+                       netif_receive_skb(vf_skb);
+               } else {
+                       ++net->stats.rx_dropped;
+                       ret = NVSP_STAT_FAIL;
+               }
+               atomic_dec(&netvsc_dev->vf_use_cnt);
+               return ret;
+       }
+
+vf_injection_done:
+       net_device_ctx = netdev_priv(net);
+       rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+
+       /* Allocate a skb - TODO direct I/O to pages? */
+       skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+       if (unlikely(!skb)) {
+               ++net->stats.rx_dropped;
+               return NVSP_STAT_FAIL;
+       }
        skb_record_rx_queue(skb, channel->
                            offermsg.offer.sub_channel_index);
 
@@ -1074,7 +1125,7 @@ static void netvsc_link_change(struct work_struct *w)
                        netif_tx_stop_all_queues(net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
-                       list_add_tail(&event->list, &ndev_ctx->reconfig_events);
+                       list_add(&event->list, &ndev_ctx->reconfig_events);
                        spin_unlock_irqrestore(&ndev_ctx->lock, flags);
                        reschedule = true;
                }
@@ -1102,6 +1153,175 @@ static void netvsc_free_netdev(struct net_device *netdev)
        free_netdev(netdev);
 }
 
+static void netvsc_notify_peers(struct work_struct *wrk)
+{
+       struct garp_wrk *gwrk;
+
+       gwrk = container_of(wrk, struct garp_wrk, dwrk);
+
+       netdev_notify_peers(gwrk->netdev);
+
+       atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
+}
+
+static struct netvsc_device *get_netvsc_device(char *mac)
+{
+       struct net_device *dev;
+       struct net_device_context *netvsc_ctx = NULL;
+       int rtnl_locked;
+
+       rtnl_locked = rtnl_trylock();
+
+       for_each_netdev(&init_net, dev) {
+               if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
+                       if (dev->netdev_ops != &device_ops)
+                               continue;
+                       netvsc_ctx = netdev_priv(dev);
+                       break;
+               }
+       }
+       if (rtnl_locked)
+               rtnl_unlock();
+
+       if (netvsc_ctx == NULL)
+               return NULL;
+
+       return hv_get_drvdata(netvsc_ctx->device_ctx);
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev)
+{
+       struct netvsc_device *netvsc_dev;
+       const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+       if (eth_ops == NULL || eth_ops == &ethtool_ops)
+               return NOTIFY_DONE;
+
+       /*
+        * We will use the MAC address to locate the synthetic interface to
+        * associate with the VF interface. If we don't find a matching
+        * synthetic interface, move on.
+        */
+       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       if (netvsc_dev == NULL)
+               return NOTIFY_DONE;
+
+       netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name);
+       /*
+        * Take a reference on the module.
+        */
+       try_module_get(THIS_MODULE);
+       netvsc_dev->vf_netdev = vf_netdev;
+       return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_up(struct net_device *vf_netdev)
+{
+       struct netvsc_device *netvsc_dev;
+       const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+       struct net_device_context *net_device_ctx;
+
+       if (eth_ops == &ethtool_ops)
+               return NOTIFY_DONE;
+
+       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+
+       if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+               return NOTIFY_DONE;
+
+       netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name);
+       net_device_ctx = netdev_priv(netvsc_dev->ndev);
+       netvsc_dev->vf_inject = true;
+
+       /*
+        * Open the device before switching data path.
+        */
+       rndis_filter_open(net_device_ctx->device_ctx);
+
+       /*
+        * notify the host to switch the data path.
+        */
+       netvsc_switch_datapath(netvsc_dev, true);
+       netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n",
+                   vf_netdev->name);
+
+       netif_carrier_off(netvsc_dev->ndev);
+
+       /*
+        * Now notify peers. We are scheduling work to
+        * notify peers; take a reference to prevent
+        * the VF interface from vanishing.
+        */
+       atomic_inc(&netvsc_dev->vf_use_cnt);
+       net_device_ctx->gwrk.netdev = vf_netdev;
+       net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+       schedule_work(&net_device_ctx->gwrk.dwrk);
+
+       return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_down(struct net_device *vf_netdev)
+{
+       struct netvsc_device *netvsc_dev;
+       struct net_device_context *net_device_ctx;
+       const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+       if (eth_ops == &ethtool_ops)
+               return NOTIFY_DONE;
+
+       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+
+       if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+               return NOTIFY_DONE;
+
+       netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name);
+       net_device_ctx = netdev_priv(netvsc_dev->ndev);
+       netvsc_dev->vf_inject = false;
+       /*
+        * Wait for currently active users to
+        * drain out.
+        */
+
+       while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
+               udelay(50);
+       netvsc_switch_datapath(netvsc_dev, false);
+       netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n",
+                   vf_netdev->name);
+       rndis_filter_close(net_device_ctx->device_ctx);
+       netif_carrier_on(netvsc_dev->ndev);
+       /*
+        * Notify peers.
+        */
+       atomic_inc(&netvsc_dev->vf_use_cnt);
+       net_device_ctx->gwrk.netdev = netvsc_dev->ndev;
+       net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+       schedule_work(&net_device_ctx->gwrk.dwrk);
+
+       return NOTIFY_OK;
+}
+
+
+static int netvsc_unregister_vf(struct net_device *vf_netdev)
+{
+       struct netvsc_device *netvsc_dev;
+       const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+       if (eth_ops == &ethtool_ops)
+               return NOTIFY_DONE;
+
+       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       if (netvsc_dev == NULL)
+               return NOTIFY_DONE;
+       netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n",
+                   vf_netdev->name);
+
+       netvsc_dev->vf_netdev = NULL;
+       module_put(THIS_MODULE);
+       return NOTIFY_OK;
+}
+
 static int netvsc_probe(struct hv_device *dev,
                        const struct hv_vmbus_device_id *dev_id)
 {
@@ -1140,6 +1360,7 @@ static int netvsc_probe(struct hv_device *dev,
        hv_set_drvdata(dev, net);
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
+       INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
 
        spin_lock_init(&net_device_ctx->lock);
        INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1235,19 +1456,58 @@ static struct  hv_driver netvsc_drv = {
        .remove = netvsc_remove,
 };
 
+
+/*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+ * to the guest. When the corresponding VF instance is registered,
+ * we will take care of switching the data path.
+ */
+static int netvsc_netdev_event(struct notifier_block *this,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               return netvsc_register_vf(event_dev);
+       case NETDEV_UNREGISTER:
+               return netvsc_unregister_vf(event_dev);
+       case NETDEV_UP:
+               return netvsc_vf_up(event_dev);
+       case NETDEV_DOWN:
+               return netvsc_vf_down(event_dev);
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block netvsc_netdev_notifier = {
+       .notifier_call = netvsc_netdev_event,
+};
+
 static void __exit netvsc_drv_exit(void)
 {
+       unregister_netdevice_notifier(&netvsc_netdev_notifier);
        vmbus_driver_unregister(&netvsc_drv);
 }
 
 static int __init netvsc_drv_init(void)
 {
+       int ret;
+
        if (ring_size < RING_SIZE_MIN) {
                ring_size = RING_SIZE_MIN;
                pr_info("Increased ring_size to %d (min allowed)\n",
                        ring_size);
        }
-       return vmbus_driver_register(&netvsc_drv);
+       ret = vmbus_driver_register(&netvsc_drv);
+
+       if (ret)
+               return ret;
+
+       register_netdevice_notifier(&netvsc_netdev_notifier);
+       return 0;
 }
 
 MODULE_LICENSE("GPL");
index c4e1e040843385418c926b0f4c62cb1d22f39c12..a59cdebc9b4b32c116326d66b2de0d475247e543 100644 (file)
@@ -1229,6 +1229,9 @@ int rndis_filter_open(struct hv_device *dev)
        if (!net_device)
                return -EINVAL;
 
+       if (atomic_inc_return(&net_device->open_cnt) != 1)
+               return 0;
+
        return rndis_filter_open_device(net_device->extension);
 }
 
@@ -1239,5 +1242,8 @@ int rndis_filter_close(struct hv_device *dev)
        if (!nvdev)
                return -EINVAL;
 
+       if (atomic_dec_return(&nvdev->open_cnt) != 0)
+               return 0;
+
        return rndis_filter_close_device(nvdev->extension);
 }
index 89154c0797888694a082141e07d8f9c8c973c37e..b82e39d2439495641fb9edc8559efbb2fcf7b2a8 100644 (file)
@@ -1030,6 +1030,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
        if (ret) {
                dev_err(&lp->spi->dev,
                        "upload firmware failed with %d\n", ret);
+               release_firmware(fw);
                return ret;
        }
 
@@ -1037,6 +1038,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
        if (ret) {
                dev_err(&lp->spi->dev,
                        "verify firmware failed with %d\n", ret);
+               release_firmware(fw);
                return ret;
        }
 
index cb9e9fe6d77a0b75a8d0f88f523b4f6059830dcc..9f10da60e02d59af881f8194247719419ae78cd9 100644 (file)
@@ -1340,7 +1340,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
        .t_off_to_aack = 80,
        .t_off_to_tx_on = 80,
        .t_off_to_sleep = 35,
-       .t_sleep_to_off = 210,
+       .t_sleep_to_off = 1000,
        .t_frame = 4096,
        .t_p_ack = 545,
        .rssi_base_val = -91,
@@ -1355,7 +1355,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
        .t_off_to_aack = 110,
        .t_off_to_tx_on = 110,
        .t_off_to_sleep = 35,
-       .t_sleep_to_off = 380,
+       .t_sleep_to_off = 1000,
        .t_frame = 4096,
        .t_p_ack = 545,
        .rssi_base_val = -91,
@@ -1370,7 +1370,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
        .t_off_to_aack = 200,
        .t_off_to_tx_on = 200,
        .t_off_to_sleep = 35,
-       .t_sleep_to_off = 380,
+       .t_sleep_to_off = 1000,
        .t_frame = 4096,
        .t_p_ack = 545,
        .rssi_base_val = -100,
index b1cd865ade2e051fe7bea84b552bca086e8f4fef..52c9051f3b955ab374baee215b098462a1e0b96a 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Written 2013 by Werner Almesberger <werner@almesberger.net>
  *
+ * Copyright (c) 2015 - 2016 Stefan Schmidt <stefan@datenfreihafen.org>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation, version 2
@@ -472,6 +474,76 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
        return -EINVAL;
 }
 
+#define ATUSB_MAX_ED_LEVELS 0xF
+static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = {
+       -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+       -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static int
+atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
+{
+       struct atusb *atusb = hw->priv;
+       u8 val;
+
+       /* mapping 802.15.4 to driver spec */
+       switch (cca->mode) {
+       case NL802154_CCA_ENERGY:
+               val = 1;
+               break;
+       case NL802154_CCA_CARRIER:
+               val = 2;
+               break;
+       case NL802154_CCA_ENERGY_CARRIER:
+               switch (cca->opt) {
+               case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+                       val = 3;
+                       break;
+               case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+                       val = 0;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return atusb_write_subreg(atusb, SR_CCA_MODE, val);
+}
+
+static int
+atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+       struct atusb *atusb = hw->priv;
+       u32 i;
+
+       for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+               if (hw->phy->supported.cca_ed_levels[i] == mbm)
+                       return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i);
+       }
+
+       return -EINVAL;
+}
+
+static int
+atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries)
+{
+       struct atusb *atusb = hw->priv;
+       int ret;
+
+       ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be);
+       if (ret)
+               return ret;
+
+       ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be);
+       if (ret)
+               return ret;
+
+       return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries);
+}
+
 static int
 atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
 {
@@ -508,6 +580,9 @@ static struct ieee802154_ops atusb_ops = {
        .stop                   = atusb_stop,
        .set_hw_addr_filt       = atusb_set_hw_addr_filt,
        .set_txpower            = atusb_set_txpower,
+       .set_cca_mode           = atusb_set_cca_mode,
+       .set_cca_ed_level       = atusb_set_cca_ed_level,
+       .set_csma_params        = atusb_set_csma_params,
        .set_promiscuous_mode   = atusb_set_promiscuous_mode,
 };
 
@@ -636,9 +711,20 @@ static int atusb_probe(struct usb_interface *interface,
 
        hw->parent = &usb_dev->dev;
        hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
-                   IEEE802154_HW_PROMISCUOUS;
+                   IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
+
+       hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+                        WPAN_PHY_FLAG_CCA_MODE;
+
+       hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+               BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+       hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+               BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+       hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+       hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
 
-       hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+       hw->phy->cca.mode = NL802154_CCA_ENERGY;
 
        hw->phy->current_page = 0;
        hw->phy->current_channel = 11;  /* reset default */
@@ -647,6 +733,7 @@ static int atusb_probe(struct usb_interface *interface,
        hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
        hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
        ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+       hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
 
        atusb_command(atusb, ATUSB_RF_RESET, 0);
        atusb_get_and_show_chip(atusb);
index 764a2bddfaee390e936fed48eab0487b8ac9766a..f446db82856128f5b462ace90e9fcb83f9f7304f 100644 (file)
@@ -61,6 +61,7 @@
 #define REG_TXBCON0    0x1A
 #define REG_TXNCON     0x1B  /* Transmit Normal FIFO Control */
 #define BIT_TXNTRIG    BIT(0)
+#define BIT_TXNSECEN   BIT(1)
 #define BIT_TXNACKREQ  BIT(2)
 
 #define REG_TXG1CON    0x1C
 #define REG_INTSTAT    0x31  /* Interrupt Status */
 #define BIT_TXNIF      BIT(0)
 #define BIT_RXIF       BIT(3)
+#define BIT_SECIF      BIT(4)
+#define BIT_SECIGNORE  BIT(7)
 
 #define REG_INTCON     0x32  /* Interrupt Control */
 #define BIT_TXNIE      BIT(0)
 #define BIT_RXIE       BIT(3)
+#define BIT_SECIE      BIT(4)
 
 #define REG_GPIO       0x33  /* GPIO */
 #define REG_TRISGPIO   0x34  /* GPIO direction */
@@ -548,6 +552,9 @@ static void write_tx_buf_complete(void *context)
        u8 val = BIT_TXNTRIG;
        int ret;
 
+       if (ieee802154_is_secen(fc))
+               val |= BIT_TXNSECEN;
+
        if (ieee802154_is_ackreq(fc))
                val |= BIT_TXNACKREQ;
 
@@ -616,7 +623,7 @@ static int mrf24j40_start(struct ieee802154_hw *hw)
 
        /* Clear TXNIE and RXIE. Enable interrupts */
        return regmap_update_bits(devrec->regmap_short, REG_INTCON,
-                                 BIT_TXNIE | BIT_RXIE, 0);
+                                 BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0);
 }
 
 static void mrf24j40_stop(struct ieee802154_hw *hw)
@@ -1025,6 +1032,11 @@ static void mrf24j40_intstat_complete(void *context)
 
        enable_irq(devrec->spi->irq);
 
+       /* Ignore Rx security decryption */
+       if (intstat & BIT_SECIF)
+               regmap_write_async(devrec->regmap_short, REG_SECCON0,
+                                  BIT_SECIGNORE);
+
        /* Check for TX complete */
        if (intstat & BIT_TXNIF)
                ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
index cc56fac3c3f83ef51e4f42f67f9e1c4e549f7725..66c0eeafcb5d7c66d7f7d88614a2e5393cf48f7b 100644 (file)
@@ -196,6 +196,7 @@ static const struct net_device_ops ifb_netdev_ops = {
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
                      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6      | \
+                     NETIF_F_GSO_ENCAP_ALL                             | \
                      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX         | \
                      NETIF_F_HW_VLAN_STAG_TX)
 
@@ -224,6 +225,8 @@ static void ifb_setup(struct net_device *dev)
        dev->tx_queue_len = TX_Q_LIMIT;
 
        dev->features |= IFB_FEATURES;
+       dev->hw_features |= dev->features;
+       dev->hw_enc_features |= dev->features;
        dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
index 57941d3f42278407884ca4e442b5bbdf1c954ba4..1c4d395fbd494cdc158427cb90f22820332421d7 100644 (file)
@@ -113,6 +113,7 @@ static int ipvlan_init(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
        const struct net_device *phy_dev = ipvlan->phy_dev;
+       struct ipvl_port *port = ipvlan->port;
 
        dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
                     (phy_dev->state & IPVLAN_STATE_MASK);
@@ -128,6 +129,8 @@ static int ipvlan_init(struct net_device *dev)
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
 
+       port->count += 1;
+
        return 0;
 }
 
@@ -481,27 +484,21 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
 
        dev->priv_flags |= IFF_IPVLAN_SLAVE;
 
-       port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
-               goto ipvlan_destroy_port;
+               return err;
 
        err = netdev_upper_dev_link(phy_dev, dev);
-       if (err)
-               goto ipvlan_destroy_port;
+       if (err) {
+               unregister_netdevice(dev);
+               return err;
+       }
 
        list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
        ipvlan_set_port_mode(port, mode);
 
        netif_stacked_transfer_operstate(phy_dev, dev);
        return 0;
-
-ipvlan_destroy_port:
-       port->count -= 1;
-       if (!port->count)
-               ipvlan_port_destroy(phy_dev);
-
-       return err;
 }
 
 static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
index 64bb44d5d8672a2f078a69074a1c9e6e9d4fda27..c285eafd3f1c11d726daf67aaff37b9abf7da6fb 100644 (file)
@@ -1427,7 +1427,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
                /* Check for empty frame */
                if (!skb->len) {
                        ali_ircc_change_speed(self, speed); 
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        spin_unlock_irqrestore(&self->lock, flags);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
@@ -1533,7 +1533,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
        /* Restore bank register */
        switch_bank(iobase, BANK0);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&self->lock, flags);
        dev_kfree_skb(skb);
 
@@ -1946,7 +1946,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
                /* Check for empty frame */
                if (!skb->len) {
                        ali_ircc_change_speed(self, speed); 
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        spin_unlock_irqrestore(&self->lock, flags);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
@@ -1966,7 +1966,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
        /* Turn on transmit finished interrupt. Will fire immediately!  */
        outb(UART_IER_THRI, iobase+UART_IER); 
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&self->lock, flags);
 
        dev_kfree_skb(skb);
index 303c4bd26e17953de1693475b042266eac8dca90..be5bb0b7f29ca5f90d622523ff97baaad9921b53 100644 (file)
@@ -531,7 +531,7 @@ static void bfin_sir_send_work(struct work_struct *work)
        bfin_sir_dma_tx_chars(dev);
 #endif
        bfin_sir_enable_tx(port);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 }
 
 static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
index 25f21968fa5c61506af3ed49fc0d9a7fa4ad15c4..a198946bc54fda6ce8df93c2d4c12b22d32eb117 100644 (file)
@@ -429,7 +429,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
                         * do an extra memcpy and increment packet counters...
                         * Jean II */
                        irda_usb_change_speed_xbofs(self);
-                       netdev->trans_start = jiffies;
+                       netif_trans_update(netdev);
                        /* Will netif_wake_queue() in callback */
                        goto drop;
                }
@@ -526,7 +526,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
                netdev->stats.tx_packets++;
                 netdev->stats.tx_bytes += skb->len;
                
-               netdev->trans_start = jiffies;
+               netif_trans_update(netdev);
        }
        spin_unlock_irqrestore(&self->lock, flags);
        
index dc0dbd8dd0b5b293d05c1bfd121410a0be35dc42..9ef13d8ed813f243511c9a11993dc73f7989b075 100644 (file)
@@ -1399,7 +1399,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
                                 * to make sure packets gets through the
                                 * proper xmit handler - Jean II */
                        }
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        spin_unlock_irqrestore(&self->lock, flags);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
@@ -1424,7 +1424,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
        /* Restore bank register */
        outb(bank, iobase+BSR);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&self->lock, flags);
 
        dev_kfree_skb(skb);
@@ -1470,7 +1470,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
                                 * the speed change has been done.
                                 * Jean II */
                        }
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        spin_unlock_irqrestore(&self->lock, flags);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
@@ -1553,7 +1553,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
        /* Restore bank register */
        outb(bank, iobase+BSR);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&self->lock, flags);
        dev_kfree_skb(skb);
 
index b455ffe8850ceda13255ad13e5a52d236b3ae8b2..dcf92ba8087257d3ffe8c0a8dab4d5475cb43ea4 100644 (file)
@@ -862,7 +862,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
        spin_lock_irqsave(&self->lock, flags);
        smsc_ircc_sir_start(self);
        smsc_ircc_change_speed(self, self->io.speed);
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
        netif_wake_queue(dev);
        spin_unlock_irqrestore(&self->lock, flags);
 }
index 83cc48a01802b76fe2a53923e13263cea80e484f..42da094b68ddfade0c7b43f527d2c8af56b17ec5 100644 (file)
@@ -718,7 +718,7 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
 
        stir->netdev->stats.tx_packets++;
        stir->netdev->stats.tx_bytes += skb->len;
-       stir->netdev->trans_start = jiffies;
+       netif_trans_update(stir->netdev);
        pr_debug("send %d (%d)\n", skb->len, wraplen);
 
        if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
index 6960d4cd3caed83e1e11dc661c1faf492031b20f..ca4442a9d63109820aad26ed9aa448847b1e5da2 100644 (file)
@@ -774,7 +774,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
                /* Check for empty frame */
                if (!skb->len) {
                        via_ircc_change_speed(self, speed);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
                } else
@@ -821,7 +821,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
        RXStart(iobase, OFF);
        TXStart(iobase, ON);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        spin_unlock_irqrestore(&self->lock, flags);
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
@@ -849,7 +849,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
        if ((speed != self->io.speed) && (speed != -1)) {
                if (!skb->len) {
                        via_ircc_change_speed(self, speed);
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
                } else
@@ -869,7 +869,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
        via_ircc_dma_xmit(self, iobase);
 //F01   }
 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        dev_kfree_skb(skb);
        spin_unlock_irqrestore(&self->lock, flags);
        return NETDEV_TX_OK;
index 84d3e5ca8817197a15165d2c61f8a0f58550c5b6..460740ccc238dfe86125b1c21642ddec46937fb6 100644 (file)
@@ -85,7 +85,7 @@ struct gcm_iv {
  * @tfm: crypto struct, key storage
  */
 struct macsec_key {
-       u64 id;
+       u8 id[MACSEC_KEYID_LEN];
        struct crypto_aead *tfm;
 };
 
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        macsec_skb_cb(skb)->valid = false;
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
        if (!req) {
                kfree_skb(skb);
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
                skb = skb_unshare(skb, GFP_ATOMIC);
                if (!skb) {
                        aead_request_free(req);
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                }
        } else {
                /* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        dev_hold(dev);
        ret = crypto_aead_decrypt(req);
        if (ret == -EINPROGRESS) {
-               return NULL;
+               return ERR_PTR(ret);
        } else if (ret != 0) {
                /* decryption/authentication failed
                 * 10.6 if validateFrames is disabled, deliver anyway
                 */
                if (ret != -EBADMSG) {
                        kfree_skb(skb);
-                       skb = NULL;
+                       skb = ERR_PTR(ret);
                }
        } else {
                macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
            secy->validate_frames != MACSEC_VALIDATE_DISABLED)
                skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
 
-       if (!skb) {
-               macsec_rxsa_put(rx_sa);
+       if (IS_ERR(skb)) {
+               /* the decrypt callback needs the reference */
+               if (PTR_ERR(skb) != -EINPROGRESS)
+                       macsec_rxsa_put(rx_sa);
                rcu_read_unlock();
                *pskb = NULL;
                return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
                            macsec_extra_len(macsec_skb_cb(skb)->has_sci));
        macsec_reset_skb(skb, secy->netdev);
 
-       macsec_rxsa_put(rx_sa);
+       if (rx_sa)
+               macsec_rxsa_put(rx_sa);
        count_rx(dev, skb->len);
 
        rcu_read_unlock();
@@ -1405,9 +1408,10 @@ static sci_t nla_get_sci(const struct nlattr *nla)
        return (__force sci_t)nla_get_u64(nla);
 }
 
-static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value)
+static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
+                      int padattr)
 {
-       return nla_put_u64(skb, attrtype, (__force u64)value);
+       return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
 }
 
 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
@@ -1526,7 +1530,8 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
        [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
        [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
        [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
-       [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 },
+       [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
+                                  .len = MACSEC_KEYID_LEN, },
        [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
                                 .len = MACSEC_MAX_KEY_LEN, },
 };
@@ -1573,6 +1578,9 @@ static bool validate_add_rxsa(struct nlattr **attrs)
                        return false;
        }
 
+       if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+               return false;
+
        return true;
 }
 
@@ -1622,8 +1630,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        }
 
        rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
-       if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
-                      secy->icv_len)) {
+       if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+                                secy->key_len, secy->icv_len)) {
+               kfree(rx_sa);
                rtnl_unlock();
                return -ENOMEM;
        }
@@ -1637,7 +1646,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
                rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
 
-       rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+       nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
        rx_sa->sc = rx_sc;
        rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
 
@@ -1718,6 +1727,9 @@ static bool validate_add_txsa(struct nlattr **attrs)
                        return false;
        }
 
+       if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+               return false;
+
        return true;
 }
 
@@ -1768,11 +1780,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
        tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
        if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
                                 secy->key_len, secy->icv_len)) {
+               kfree(tx_sa);
                rtnl_unlock();
                return -ENOMEM;
        }
 
-       tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+       nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
 
        spin_lock_bh(&tx_sa->lock);
        tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
@@ -2131,16 +2144,36 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
                sum.InPktsUnusedSA    += tmp.InPktsUnusedSA;
        }
 
-       if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
-           nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+       if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+                             sum.InOctetsValidated,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+                             sum.InOctetsDecrypted,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+                             sum.InPktsUnchecked,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+                             sum.InPktsDelayed,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+                             sum.InPktsOK,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+                             sum.InPktsInvalid,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+                             sum.InPktsLate,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+                             sum.InPktsNotValid,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+                             sum.InPktsNotUsingSA,
+                             MACSEC_RXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+                             sum.InPktsUnusedSA,
+                             MACSEC_RXSC_STATS_ATTR_PAD))
                return -EMSGSIZE;
 
        return 0;
@@ -2169,10 +2202,18 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
                sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
        }
 
-       if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
-           nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) ||
-           nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) ||
-           nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted))
+       if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+                             sum.OutPktsProtected,
+                             MACSEC_TXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+                             sum.OutPktsEncrypted,
+                             MACSEC_TXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+                             sum.OutOctetsProtected,
+                             MACSEC_TXSC_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+                             sum.OutOctetsEncrypted,
+                             MACSEC_TXSC_STATS_ATTR_PAD))
                return -EMSGSIZE;
 
        return 0;
@@ -2205,14 +2246,30 @@ static int copy_secy_stats(struct sk_buff *skb,
                sum.InPktsOverrun    += tmp.InPktsOverrun;
        }
 
-       if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) ||
-           nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun))
+       if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+                             sum.OutPktsUntagged,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+                             sum.InPktsUntagged,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+                             sum.OutPktsTooLong,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+                             sum.InPktsNoTag,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+                             sum.InPktsBadTag,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+                             sum.InPktsUnknownSCI,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+                             sum.InPktsNoSCI,
+                             MACSEC_SECY_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+                             sum.InPktsOverrun,
+                             MACSEC_SECY_STATS_ATTR_PAD))
                return -EMSGSIZE;
 
        return 0;
@@ -2226,8 +2283,11 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
        if (!secy_nest)
                return 1;
 
-       if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
-           nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+       if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
+                       MACSEC_SECY_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+                             MACSEC_DEFAULT_CIPHER_ID,
+                             MACSEC_SECY_ATTR_PAD) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2328,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
        if (!hdr)
                return -EMSGSIZE;
 
-       rtnl_lock();
+       genl_dump_check_consistent(cb, hdr, &macsec_fam);
 
        if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
                goto nla_put_failure;
@@ -2312,7 +2372,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
                if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
                    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
-                   nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) ||
+                   nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
                    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
                        nla_nest_cancel(skb, txsa_nest);
                        nla_nest_cancel(skb, txsa_list);
@@ -2353,7 +2413,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
                }
 
                if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
-                   nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) {
+                   nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
+                               MACSEC_RXSC_ATTR_PAD)) {
                        nla_nest_cancel(skb, rxsc_nest);
                        nla_nest_cancel(skb, rxsc_list);
                        goto nla_put_failure;
@@ -2413,7 +2474,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
                        if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
                            nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
-                           nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) ||
+                           nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
                            nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
                                nla_nest_cancel(skb, rxsa_nest);
                                nla_nest_cancel(skb, rxsc_nest);
@@ -2429,18 +2490,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
        nla_nest_end(skb, rxsc_list);
 
-       rtnl_unlock();
-
        genlmsg_end(skb, hdr);
 
        return 0;
 
 nla_put_failure:
-       rtnl_unlock();
        genlmsg_cancel(skb, hdr);
        return -EMSGSIZE;
 }
 
+static int macsec_generation = 1; /* protected by RTNL */
+
 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
@@ -2450,6 +2510,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
        dev_idx = cb->args[0];
 
        d = 0;
+       rtnl_lock();
+
+       cb->seq = macsec_generation;
+
        for_each_netdev(net, dev) {
                struct macsec_secy *secy;
 
@@ -2467,6 +2531,7 @@ next:
        }
 
 done:
+       rtnl_unlock();
        cb->args[0] = d;
        return skb->len;
 }
@@ -2826,7 +2891,7 @@ static void macsec_free_netdev(struct net_device *dev)
 static void macsec_setup(struct net_device *dev)
 {
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->netdev_ops = &macsec_netdev_ops;
        dev->destructor = macsec_free_netdev;
 
@@ -2920,10 +2985,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
        struct net_device *real_dev = macsec->real_dev;
        struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
 
+       macsec_generation++;
+
        unregister_netdevice_queue(dev, head);
        list_del_rcu(&macsec->secys);
-       if (list_empty(&rxd->secys))
+       if (list_empty(&rxd->secys)) {
                netdev_rx_handler_unregister(real_dev);
+               kfree(rxd);
+       }
 
        macsec_del_dev(macsec);
 }
@@ -2945,8 +3014,10 @@ static int register_macsec_dev(struct net_device *real_dev,
 
                err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
                                                 rxd);
-               if (err < 0)
+               if (err < 0) {
+                       kfree(rxd);
                        return err;
+               }
        }
 
        list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3137,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        if (err < 0)
                goto del_dev;
 
+       macsec_generation++;
+
        dev_hold(real_dev);
 
        return 0;
@@ -3079,7 +3152,7 @@ unregister:
 
 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
 {
-       u64 csid = DEFAULT_CIPHER_ID;
+       u64 csid = MACSEC_DEFAULT_CIPHER_ID;
        u8 icv_len = DEFAULT_ICV_LEN;
        int flag;
        bool es, scb, sci;
@@ -3094,8 +3167,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
                icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
 
        switch (csid) {
-       case DEFAULT_CIPHER_ID:
-       case DEFAULT_CIPHER_ALT:
+       case MACSEC_DEFAULT_CIPHER_ID:
+       case MACSEC_DEFAULT_CIPHER_ALT:
                if (icv_len < MACSEC_MIN_ICV_LEN ||
                    icv_len > MACSEC_MAX_ICV_LEN)
                        return -EINVAL;
@@ -3129,8 +3202,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
            nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
                return -EINVAL;
 
-       if ((data[IFLA_MACSEC_PROTECT] &&
-            nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+       if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
+            nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
            !data[IFLA_MACSEC_WINDOW])
                return -EINVAL;
 
@@ -3145,9 +3218,9 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
 static size_t macsec_get_size(const struct net_device *dev)
 {
        return 0 +
-               nla_total_size(8) + /* SCI */
+               nla_total_size_64bit(8) + /* SCI */
                nla_total_size(1) + /* ICV_LEN */
-               nla_total_size(8) + /* CIPHER_SUITE */
+               nla_total_size_64bit(8) + /* CIPHER_SUITE */
                nla_total_size(4) + /* WINDOW */
                nla_total_size(1) + /* ENCODING_SA */
                nla_total_size(1) + /* ENCRYPT */
@@ -3166,9 +3239,11 @@ static int macsec_fill_info(struct sk_buff *skb,
        struct macsec_secy *secy = &macsec_priv(dev)->secy;
        struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 
-       if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
+       if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
+                       IFLA_MACSEC_PAD) ||
            nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
-           nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+           nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
+                             MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
            nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
            nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
            nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
index 2bcf1f321beaebbe028b7922a2c0495f59b12e70..cb01023eab412ee67e59bbf5412777b8da19aa29 100644 (file)
@@ -795,6 +795,7 @@ static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        const struct net_device *lowerdev = vlan->lowerdev;
+       struct macvlan_port *port = vlan->port;
 
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
@@ -812,6 +813,8 @@ static int macvlan_init(struct net_device *dev)
        if (!vlan->pcpu_stats)
                return -ENOMEM;
 
+       port->count += 1;
+
        return 0;
 }
 
@@ -1312,10 +1315,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                        return err;
        }
 
-       port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
-               goto destroy_port;
+               return err;
 
        dev->priv_flags |= IFF_MACVLAN;
        err = netdev_upper_dev_link(lowerdev, dev);
@@ -1330,10 +1332,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 
 unregister_netdev:
        unregister_netdevice(dev);
-destroy_port:
-       port->count -= 1;
-       if (!port->count)
-               macvlan_port_destroy(lowerdev);
 
        return err;
 }
index 95394edd1ed528fc9cf94fb39aadeaa28c47fb26..bd6720962b1fc4325cf4ca848b73bb633bc4d98c 100644 (file)
@@ -129,7 +129,18 @@ static DEFINE_MUTEX(minor_lock);
 static DEFINE_IDR(minor_idr);
 
 #define GOODCOPY_LEN 128
-static struct class *macvtap_class;
+static const void *macvtap_net_namespace(struct device *d)
+{
+       struct net_device *dev = to_net_dev(d->parent);
+       return dev_net(dev);
+}
+
+static struct class macvtap_class = {
+       .name = "macvtap",
+       .owner = THIS_MODULE,
+       .ns_type = &net_ns_type_operations,
+       .namespace = macvtap_net_namespace,
+};
 static struct cdev macvtap_cdev;
 
 static const struct proto_ops macvtap_socket_ops;
@@ -373,7 +384,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
                        goto wake_up;
                }
 
-               kfree_skb(skb);
+               consume_skb(skb);
                while (segs) {
                        struct sk_buff *nskb = segs->next;
 
@@ -1278,10 +1289,12 @@ static int macvtap_device_event(struct notifier_block *unused,
        struct device *classdev;
        dev_t devt;
        int err;
+       char tap_name[IFNAMSIZ];
 
        if (dev->rtnl_link_ops != &macvtap_link_ops)
                return NOTIFY_DONE;
 
+       snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
        vlan = netdev_priv(dev);
 
        switch (event) {
@@ -1295,16 +1308,24 @@ static int macvtap_device_event(struct notifier_block *unused,
                        return notifier_from_errno(err);
 
                devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
-               classdev = device_create(macvtap_class, &dev->dev, devt,
-                                        dev, "tap%d", dev->ifindex);
+               classdev = device_create(&macvtap_class, &dev->dev, devt,
+                                        dev, tap_name);
                if (IS_ERR(classdev)) {
                        macvtap_free_minor(vlan);
                        return notifier_from_errno(PTR_ERR(classdev));
                }
+               err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+                                       tap_name);
+               if (err)
+                       return notifier_from_errno(err);
                break;
        case NETDEV_UNREGISTER:
+               /* vlan->minor == 0 if NETDEV_REGISTER above failed */
+               if (vlan->minor == 0)
+                       break;
+               sysfs_remove_link(&dev->dev.kobj, tap_name);
                devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
-               device_destroy(macvtap_class, devt);
+               device_destroy(&macvtap_class, devt);
                macvtap_free_minor(vlan);
                break;
        }
@@ -1330,11 +1351,9 @@ static int macvtap_init(void)
        if (err)
                goto out2;
 
-       macvtap_class = class_create(THIS_MODULE, "macvtap");
-       if (IS_ERR(macvtap_class)) {
-               err = PTR_ERR(macvtap_class);
+       err = class_register(&macvtap_class);
+       if (err)
                goto out3;
-       }
 
        err = register_netdevice_notifier(&macvtap_notifier_block);
        if (err)
@@ -1349,7 +1368,7 @@ static int macvtap_init(void)
 out5:
        unregister_netdevice_notifier(&macvtap_notifier_block);
 out4:
-       class_unregister(macvtap_class);
+       class_unregister(&macvtap_class);
 out3:
        cdev_del(&macvtap_cdev);
 out2:
@@ -1363,7 +1382,7 @@ static void macvtap_exit(void)
 {
        rtnl_link_unregister(&macvtap_link_ops);
        unregister_netdevice_notifier(&macvtap_notifier_block);
-       class_unregister(macvtap_class);
+       class_unregister(&macvtap_class);
        cdev_del(&macvtap_cdev);
        unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
        idr_destroy(&minor_idr);
index b3ffaee3085885e02a2556f5d34517320a875f1b..f279a897a5c7fe0e875fb8b058f4c00ae3059f62 100644 (file)
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
         * in the FIFO. In such cases, the FIFO enters an error mode it
         * cannot recover from by software.
         */
-       if (phydev->drv->phy_id == ATH8030_PHY_ID) {
-               if (phydev->state == PHY_NOLINK) {
-                       if (priv->gpiod_reset && !priv->phy_reset) {
-                               struct at803x_context context;
-
-                               at803x_context_save(phydev, &context);
-
-                               gpiod_set_value(priv->gpiod_reset, 1);
-                               msleep(1);
-                               gpiod_set_value(priv->gpiod_reset, 0);
-                               msleep(1);
-
-                               at803x_context_restore(phydev, &context);
-
-                               phydev_dbg(phydev, "%s(): phy was reset\n",
-                                          __func__);
-                               priv->phy_reset = true;
-                       }
-               } else {
-                       priv->phy_reset = false;
+       if (phydev->state == PHY_NOLINK) {
+               if (priv->gpiod_reset && !priv->phy_reset) {
+                       struct at803x_context context;
+
+                       at803x_context_save(phydev, &context);
+
+                       gpiod_set_value(priv->gpiod_reset, 1);
+                       msleep(1);
+                       gpiod_set_value(priv->gpiod_reset, 0);
+                       msleep(1);
+
+                       at803x_context_restore(phydev, &context);
+
+                       phydev_dbg(phydev, "%s(): phy was reset\n",
+                                  __func__);
+                       priv->phy_reset = true;
                }
+       } else {
+               priv->phy_reset = false;
        }
 }
 
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
index fc07a886602042392c2316943e8396deaadf63f9..9050f21e6f337e5934e1a639e63ad13607574b2d 100644 (file)
@@ -328,7 +328,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
                return ERR_PTR(ret);
 
        phy = get_phy_device(fmb->mii_bus, phy_addr, false);
-       if (!phy || IS_ERR(phy)) {
+       if (IS_ERR(phy)) {
                fixed_phy_del(phy_addr);
                return ERR_PTR(-EINVAL);
        }
index 308ade0eb1b6985d1a7a871607ed964a6b9501eb..5c81d6faf304f5772907972afa363932212526f7 100644 (file)
@@ -45,13 +45,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
        struct mdio_mux_parent_bus *pb = cb->parent;
        int r;
 
-       /* In theory multiple mdio_mux could be stacked, thus creating
-        * more than a single level of nesting.  But in practice,
-        * SINGLE_DEPTH_NESTING will cover the vast majority of use
-        * cases.  We use it, instead of trying to handle the general
-        * case.
-        */
-       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
@@ -76,7 +70,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
 
        int r;
 
-       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
index 0cba64f1ecf4089c9b2f95f0c35fa94a50d1cb55..09deef4bed097dd2a6231c9cfd4916df86fe1e99 100644 (file)
@@ -333,7 +333,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
                        struct phy_device *phydev;
 
                        phydev = mdiobus_scan(bus, i);
-                       if (IS_ERR(phydev)) {
+                       if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
                                err = PTR_ERR(phydev);
                                goto error;
                        }
@@ -419,7 +419,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
        int err;
 
        phydev = get_phy_device(bus, addr, false);
-       if (IS_ERR(phydev) || phydev == NULL)
+       if (IS_ERR(phydev))
                return phydev;
 
        /*
@@ -431,7 +431,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
        err = phy_device_register(phydev);
        if (err) {
                phy_device_free(phydev);
-               return NULL;
+               return ERR_PTR(-ENODEV);
        }
 
        return phydev;
@@ -457,7 +457,7 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
 
        BUG_ON(in_interrupt());
 
-       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
        retval = bus->read(bus, addr, regnum);
        mutex_unlock(&bus->mdio_lock);
 
@@ -509,7 +509,7 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
 
        BUG_ON(in_interrupt());
 
-       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
        err = bus->write(bus, addr, regnum, val);
        mutex_unlock(&bus->mdio_lock);
 
index 5590b9c182c967d80a186256162f30690c707506..603e8db50162d5c40fc7dfcb6ca93309f2eb1202 100644 (file)
@@ -362,6 +362,60 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 }
 EXPORT_SYMBOL(phy_ethtool_sset);
 
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+                             const struct ethtool_link_ksettings *cmd)
+{
+       u8 autoneg = cmd->base.autoneg;
+       u8 duplex = cmd->base.duplex;
+       u32 speed = cmd->base.speed;
+       u32 advertising;
+
+       if (cmd->base.phy_address != phydev->mdio.addr)
+               return -EINVAL;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
+       /* We make sure that we don't pass unsupported values in to the PHY */
+       advertising &= phydev->supported;
+
+       /* Verify the settings we care about. */
+       if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
+
+       if (autoneg == AUTONEG_ENABLE && advertising == 0)
+               return -EINVAL;
+
+       if (autoneg == AUTONEG_DISABLE &&
+           ((speed != SPEED_1000 &&
+             speed != SPEED_100 &&
+             speed != SPEED_10) ||
+            (duplex != DUPLEX_HALF &&
+             duplex != DUPLEX_FULL)))
+               return -EINVAL;
+
+       phydev->autoneg = autoneg;
+
+       phydev->speed = speed;
+
+       phydev->advertising = advertising;
+
+       if (autoneg == AUTONEG_ENABLE)
+               phydev->advertising |= ADVERTISED_Autoneg;
+       else
+               phydev->advertising &= ~ADVERTISED_Autoneg;
+
+       phydev->duplex = duplex;
+
+       phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
+
+       /* Restart the PHY */
+       phy_start_aneg(phydev);
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 {
        cmd->supported = phydev->supported;
@@ -385,6 +439,33 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 }
 EXPORT_SYMBOL(phy_ethtool_gset);
 
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+                             struct ethtool_link_ksettings *cmd)
+{
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               phydev->supported);
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               phydev->advertising);
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+                                               phydev->lp_advertising);
+
+       cmd->base.speed = phydev->speed;
+       cmd->base.duplex = phydev->duplex;
+       if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+               cmd->base.port = PORT_BNC;
+       else
+               cmd->base.port = PORT_MII;
+
+       cmd->base.phy_address = phydev->mdio.addr;
+       cmd->base.autoneg = phydev->autoneg;
+       cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
 /**
  * phy_mii_ioctl - generic PHY MII ioctl interface
  * @phydev: the phy_device struct
@@ -1266,3 +1347,27 @@ void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
                phydev->drv->get_wol(phydev, wol);
 }
 EXPORT_SYMBOL(phy_ethtool_get_wol);
+
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+                                  struct ethtool_link_ksettings *cmd)
+{
+       struct phy_device *phydev = ndev->phydev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_ksettings_get(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
+
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+                                  const struct ethtool_link_ksettings *cmd)
+{
+       struct phy_device *phydev = ndev->phydev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_ksettings_set(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
index e551f3a89cfd0212e381ec274c59f558eb07d2b0..e977ba931878e77cb149fd5a66c5618f723500a3 100644 (file)
@@ -529,7 +529,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
 
        /* If the phy_id is mostly Fs, there is no device there */
        if ((phy_id & 0x1fffffff) == 0x1fffffff)
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
 }
@@ -1123,8 +1123,9 @@ static int genphy_config_advert(struct phy_device *phydev)
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-       int ctl = 0;
+       int ctl = phy_read(phydev, MII_BMCR);
 
+       ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
        phydev->pause = 0;
        phydev->asym_pause = 0;
 
index b5d50d45872893ffcbdef86f8317d0512b2e2a32..93ffedfa299412f78af2c72fedc991101a52a451 100644 (file)
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
                return -ENOMEM;
 
        mutex_init(&ks->lock);
-       ks->spi = spi_dev_get(spi);
+       ks->spi = spi;
        ks->chip = &ks8995_chip[variant];
 
        if (ks->spi->dev.of_node) {
index f572b31a2b20cef55e560b8db932c419f14bd8b3..8dedafa1a95d0b2f8e1db526cc64770876488f46 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/file.h>
 #include <asm/unaligned.h>
 #include <net/slhc_vj.h>
 #include <linux/atomic.h>
@@ -183,6 +184,12 @@ struct channel {
 #endif /* CONFIG_PPP_MULTILINK */
 };
 
+struct ppp_config {
+       struct file *file;
+       s32 unit;
+       bool ifname_is_set;
+};
+
 /*
  * SMP locking issues:
  * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
@@ -269,8 +276,7 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit,
-                                       struct file *file, int *retp);
+static int ppp_create_interface(struct net *net, struct file *file, int *unit);
 static void init_ppp_file(struct ppp_file *pf, int kind);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
@@ -282,6 +288,7 @@ static int unit_get(struct idr *p, void *ptr);
 static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
+static void ppp_setup(struct net_device *dev);
 
 static const struct net_device_ops ppp_netdev_ops;
 
@@ -853,12 +860,12 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                /* Create a new ppp unit */
                if (get_user(unit, p))
                        break;
-               ppp = ppp_create_interface(net, unit, file, &err);
-               if (!ppp)
+               err = ppp_create_interface(net, file, &unit);
+               if (err < 0)
                        break;
-               file->private_data = &ppp->file;
+
                err = -EFAULT;
-               if (put_user(ppp->file.index, p))
+               if (put_user(unit, p))
                        break;
                err = 0;
                break;
@@ -960,6 +967,188 @@ static struct pernet_operations ppp_net_ops = {
        .size = sizeof(struct ppp_net),
 };
 
+static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
+{
+       struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+       int ret;
+
+       mutex_lock(&pn->all_ppp_mutex);
+
+       if (unit < 0) {
+               ret = unit_get(&pn->units_idr, ppp);
+               if (ret < 0)
+                       goto err;
+       } else {
+               /* Caller asked for a specific unit number. Fail with -EEXIST
+                * if unavailable. For backward compatibility, return -EEXIST
+                * too if idr allocation fails; this makes pppd retry without
+                * requesting a specific unit number.
+                */
+               if (unit_find(&pn->units_idr, unit)) {
+                       ret = -EEXIST;
+                       goto err;
+               }
+               ret = unit_set(&pn->units_idr, ppp, unit);
+               if (ret < 0) {
+                       /* Rewrite error for backward compatibility */
+                       ret = -EEXIST;
+                       goto err;
+               }
+       }
+       ppp->file.index = ret;
+
+       if (!ifname_is_set)
+               snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+
+       ret = register_netdevice(ppp->dev);
+       if (ret < 0)
+               goto err_unit;
+
+       atomic_inc(&ppp_unit_count);
+
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       return 0;
+
+err_unit:
+       unit_put(&pn->units_idr, ppp->file.index);
+err:
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       return ret;
+}
+
+static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+                            const struct ppp_config *conf)
+{
+       struct ppp *ppp = netdev_priv(dev);
+       int indx;
+       int err;
+
+       ppp->dev = dev;
+       ppp->ppp_net = src_net;
+       ppp->mru = PPP_MRU;
+       ppp->owner = conf->file;
+
+       init_ppp_file(&ppp->file, INTERFACE);
+       ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+
+       for (indx = 0; indx < NUM_NP; ++indx)
+               ppp->npmode[indx] = NPMODE_PASS;
+       INIT_LIST_HEAD(&ppp->channels);
+       spin_lock_init(&ppp->rlock);
+       spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+       ppp->minseq = -1;
+       skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+       ppp->pass_filter = NULL;
+       ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+       err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+       if (err < 0)
+               return err;
+
+       conf->file->private_data = &ppp->file;
+
+       return 0;
+}
+
+static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+       [IFLA_PPP_DEV_FD]       = { .type = NLA_S32 },
+};
+
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (!data)
+               return -EINVAL;
+
+       if (!data[IFLA_PPP_DEV_FD])
+               return -EINVAL;
+       if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
+               return -EBADF;
+
+       return 0;
+}
+
+static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct ppp_config conf = {
+               .unit = -1,
+               .ifname_is_set = true,
+       };
+       struct file *file;
+       int err;
+
+       file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
+       if (!file)
+               return -EBADF;
+
+       /* rtnl_lock is already held here, but ppp_create_interface() locks
+        * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
+        * possible deadlock due to lock order inversion, at the cost of
+        * pushing the problem back to userspace.
+        */
+       if (!mutex_trylock(&ppp_mutex)) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (file->f_op != &ppp_device_fops || file->private_data) {
+               err = -EBADF;
+               goto out_unlock;
+       }
+
+       conf.file = file;
+       err = ppp_dev_configure(src_net, dev, &conf);
+
+out_unlock:
+       mutex_unlock(&ppp_mutex);
+out:
+       fput(file);
+
+       return err;
+}
+
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
+{
+       unregister_netdevice_queue(dev, head);
+}
+
+static size_t ppp_nl_get_size(const struct net_device *dev)
+{
+       return 0;
+}
+
+static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       return 0;
+}
+
+static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+{
+       struct ppp *ppp = netdev_priv(dev);
+
+       return ppp->ppp_net;
+}
+
+static struct rtnl_link_ops ppp_link_ops __read_mostly = {
+       .kind           = "ppp",
+       .maxtype        = IFLA_PPP_MAX,
+       .policy         = ppp_nl_policy,
+       .priv_size      = sizeof(struct ppp),
+       .setup          = ppp_setup,
+       .validate       = ppp_nl_validate,
+       .newlink        = ppp_nl_newlink,
+       .dellink        = ppp_nl_dellink,
+       .get_size       = ppp_nl_get_size,
+       .fill_info      = ppp_nl_fill_info,
+       .get_link_net   = ppp_nl_get_link_net,
+};
+
 #define PPP_MAJOR      108
 
 /* Called at boot time if ppp is compiled into the kernel,
@@ -988,11 +1177,19 @@ static int __init ppp_init(void)
                goto out_chrdev;
        }
 
+       err = rtnl_link_register(&ppp_link_ops);
+       if (err) {
+               pr_err("failed to register rtnetlink PPP handler\n");
+               goto out_class;
+       }
+
        /* not a big deal if we fail here :-) */
        device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
 
        return 0;
 
+out_class:
+       class_destroy(ppp_class);
 out_chrdev:
        unregister_chrdev(PPP_MAJOR, "ppp");
 out_net:
@@ -2732,102 +2929,42 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *ppp_create_interface(struct net *net, int unit,
-                                       struct file *file, int *retp)
+static int ppp_create_interface(struct net *net, struct file *file, int *unit)
 {
+       struct ppp_config conf = {
+               .file = file,
+               .unit = *unit,
+               .ifname_is_set = false,
+       };
+       struct net_device *dev;
        struct ppp *ppp;
-       struct ppp_net *pn;
-       struct net_device *dev = NULL;
-       int ret = -ENOMEM;
-       int i;
+       int err;
 
        dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
-       if (!dev)
-               goto out1;
-
-       pn = ppp_pernet(net);
-
-       ppp = netdev_priv(dev);
-       ppp->dev = dev;
-       ppp->mru = PPP_MRU;
-       init_ppp_file(&ppp->file, INTERFACE);
-       ppp->file.hdrlen = PPP_HDRLEN - 2;      /* don't count proto bytes */
-       ppp->owner = file;
-       for (i = 0; i < NUM_NP; ++i)
-               ppp->npmode[i] = NPMODE_PASS;
-       INIT_LIST_HEAD(&ppp->channels);
-       spin_lock_init(&ppp->rlock);
-       spin_lock_init(&ppp->wlock);
-#ifdef CONFIG_PPP_MULTILINK
-       ppp->minseq = -1;
-       skb_queue_head_init(&ppp->mrq);
-#endif /* CONFIG_PPP_MULTILINK */
-#ifdef CONFIG_PPP_FILTER
-       ppp->pass_filter = NULL;
-       ppp->active_filter = NULL;
-#endif /* CONFIG_PPP_FILTER */
-
-       /*
-        * drum roll: don't forget to set
-        * the net device is belong to
-        */
+       if (!dev) {
+               err = -ENOMEM;
+               goto err;
+       }
        dev_net_set(dev, net);
+       dev->rtnl_link_ops = &ppp_link_ops;
 
        rtnl_lock();
-       mutex_lock(&pn->all_ppp_mutex);
 
-       if (unit < 0) {
-               unit = unit_get(&pn->units_idr, ppp);
-               if (unit < 0) {
-                       ret = unit;
-                       goto out2;
-               }
-       } else {
-               ret = -EEXIST;
-               if (unit_find(&pn->units_idr, unit))
-                       goto out2; /* unit already exists */
-               /*
-                * if caller need a specified unit number
-                * lets try to satisfy him, otherwise --
-                * he should better ask us for new unit number
-                *
-                * NOTE: yes I know that returning EEXIST it's not
-                * fair but at least pppd will ask us to allocate
-                * new unit in this case so user is happy :)
-                */
-               unit = unit_set(&pn->units_idr, ppp, unit);
-               if (unit < 0)
-                       goto out2;
-       }
-
-       /* Initialize the new ppp unit */
-       ppp->file.index = unit;
-       sprintf(dev->name, "ppp%d", unit);
-
-       ret = register_netdevice(dev);
-       if (ret != 0) {
-               unit_put(&pn->units_idr, unit);
-               netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
-                          dev->name, ret);
-               goto out2;
-       }
-
-       ppp->ppp_net = net;
+       err = ppp_dev_configure(net, dev, &conf);
+       if (err < 0)
+               goto err_dev;
+       ppp = netdev_priv(dev);
+       *unit = ppp->file.index;
 
-       atomic_inc(&ppp_unit_count);
-       mutex_unlock(&pn->all_ppp_mutex);
        rtnl_unlock();
 
-       *retp = 0;
-       return ppp;
+       return 0;
 
-out2:
-       mutex_unlock(&pn->all_ppp_mutex);
+err_dev:
        rtnl_unlock();
        free_netdev(dev);
-out1:
-       *retp = ret;
-       return NULL;
+err:
+       return err;
 }
 
 /*
@@ -3016,6 +3153,7 @@ static void __exit ppp_cleanup(void)
        /* should never happen */
        if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
                pr_err("PPP: removing module but units remain!\n");
+       rtnl_link_unregister(&ppp_link_ops);
        unregister_chrdev(PPP_MAJOR, "ppp");
        device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
        class_destroy(ppp_class);
@@ -3074,4 +3212,5 @@ EXPORT_SYMBOL(ppp_register_compressor);
 EXPORT_SYMBOL(ppp_unregister_compressor);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS_RTNL_LINK("ppp");
 MODULE_ALIAS("devname:ppp");
index 9cfe6aeac84e12837c99dff5af80a92a37ec8c88..a31f4610b4936b698724597d66164c1470cdeac9 100644 (file)
@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        unsigned long flags;
        int add_num = 1;
 
-       local_irq_save(flags);
-       if (!spin_trylock(&rnet->tx_lock)) {
-               local_irq_restore(flags);
-               return NETDEV_TX_LOCKED;
-       }
+       spin_lock_irqsave(&rnet->tx_lock, flags);
 
        if (is_multicast_ether_addr(eth->h_dest))
                add_num = nets[rnet->mport->id].nact;
index a17d86a5773477fba7059600aa9a9291ef0106b6..9ed6d1c1ee45f15b2686b7c4102045b3acead55c 100644 (file)
@@ -407,7 +407,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
        set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
        actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
 #ifdef SL_CHECK_TRANSMIT
-       sl->dev->trans_start = jiffies;
+       netif_trans_update(sl->dev);
 #endif
        sl->xleft = count - actual;
        sl->xhead = sl->xbuff + actual;
index 64bc143eddd9830bc470b35c8c1b9c427ba979c3..425e983bab93069c42b5a32e876a41c70226d232 100644 (file)
@@ -131,6 +131,17 @@ struct tap_filter {
 
 #define TUN_FLOW_EXPIRE (3 * HZ)
 
+struct tun_pcpu_stats {
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 tx_packets;
+       u64 tx_bytes;
+       struct u64_stats_sync syncp;
+       u32 rx_dropped;
+       u32 tx_dropped;
+       u32 rx_frame_errors;
+};
+
 /* A tun_file connects an open character device to a tuntap netdevice. It
  * also contains all socket related structures (except sock_fprog and tap_filter)
  * to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -205,6 +216,7 @@ struct tun_struct {
        struct list_head disabled;
        void *security;
        u32 flow_count;
+       struct tun_pcpu_stats __percpu *pcpu_stats;
 };
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -821,7 +833,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        if (txq >= numqueues)
                goto drop;
 
-       if (numqueues == 1) {
+#ifdef CONFIG_RPS
+       if (numqueues == 1 && static_key_false(&rps_needed)) {
                /* Select queue was not called for the skbuff, so we extract the
                 * RPS hash and save it into the flow_table here.
                 */
@@ -836,6 +849,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
                                tun_flow_save_rps_rxhash(e, rxhash);
                }
        }
+#endif
 
        tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
@@ -886,7 +900,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
+       this_cpu_inc(tun->pcpu_stats->tx_dropped);
        skb_tx_error(skb);
        kfree_skb(skb);
        rcu_read_unlock();
@@ -949,6 +963,43 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
        tun->align = new_hr;
 }
 
+static struct rtnl_link_stats64 *
+tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
+       struct tun_struct *tun = netdev_priv(dev);
+       struct tun_pcpu_stats *p;
+       int i;
+
+       for_each_possible_cpu(i) {
+               u64 rxpackets, rxbytes, txpackets, txbytes;
+               unsigned int start;
+
+               p = per_cpu_ptr(tun->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin(&p->syncp);
+                       rxpackets       = p->rx_packets;
+                       rxbytes         = p->rx_bytes;
+                       txpackets       = p->tx_packets;
+                       txbytes         = p->tx_bytes;
+               } while (u64_stats_fetch_retry(&p->syncp, start));
+
+               stats->rx_packets       += rxpackets;
+               stats->rx_bytes         += rxbytes;
+               stats->tx_packets       += txpackets;
+               stats->tx_bytes         += txbytes;
+
+               /* u32 counters */
+               rx_dropped      += p->rx_dropped;
+               rx_frame_errors += p->rx_frame_errors;
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->rx_dropped  = rx_dropped;
+       stats->rx_frame_errors = rx_frame_errors;
+       stats->tx_dropped = tx_dropped;
+       return stats;
+}
+
 static const struct net_device_ops tun_netdev_ops = {
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
@@ -961,6 +1012,7 @@ static const struct net_device_ops tun_netdev_ops = {
        .ndo_poll_controller    = tun_poll_controller,
 #endif
        .ndo_set_rx_headroom    = tun_set_headroom,
+       .ndo_get_stats64        = tun_net_get_stats64,
 };
 
 static const struct net_device_ops tap_netdev_ops = {
@@ -979,6 +1031,7 @@ static const struct net_device_ops tap_netdev_ops = {
 #endif
        .ndo_features_check     = passthru_features_check,
        .ndo_set_rx_headroom    = tun_set_headroom,
+       .ndo_get_stats64        = tun_net_get_stats64,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
@@ -1017,7 +1070,6 @@ static void tun_net_init(struct net_device *dev)
                /* Zero header length */
                dev->type = ARPHRD_NONE;
                dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
-               dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
                break;
 
        case IFF_TAP:
@@ -1029,7 +1081,6 @@ static void tun_net_init(struct net_device *dev)
 
                eth_hw_addr_random(dev);
 
-               dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
                break;
        }
 }
@@ -1105,6 +1156,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        size_t total_len = iov_iter_count(from);
        size_t len = total_len, align = tun->align, linear;
        struct virtio_net_hdr gso = { 0 };
+       struct tun_pcpu_stats *stats;
        int good_linear;
        int copylen;
        bool zerocopy = false;
@@ -1179,7 +1231,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
        if (IS_ERR(skb)) {
                if (PTR_ERR(skb) != -EAGAIN)
-                       tun->dev->stats.rx_dropped++;
+                       this_cpu_inc(tun->pcpu_stats->rx_dropped);
                return PTR_ERR(skb);
        }
 
@@ -1194,7 +1246,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (err) {
-               tun->dev->stats.rx_dropped++;
+               this_cpu_inc(tun->pcpu_stats->rx_dropped);
                kfree_skb(skb);
                return -EFAULT;
        }
@@ -1202,7 +1254,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
                                          tun16_to_cpu(tun, gso.csum_offset))) {
-                       tun->dev->stats.rx_frame_errors++;
+                       this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -1219,7 +1271,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                                pi.proto = htons(ETH_P_IPV6);
                                break;
                        default:
-                               tun->dev->stats.rx_dropped++;
+                               this_cpu_inc(tun->pcpu_stats->rx_dropped);
                                kfree_skb(skb);
                                return -EINVAL;
                        }
@@ -1247,7 +1299,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
                default:
-                       tun->dev->stats.rx_frame_errors++;
+                       this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -1257,7 +1309,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
                if (skb_shinfo(skb)->gso_size == 0) {
-                       tun->dev->stats.rx_frame_errors++;
+                       this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -1280,8 +1332,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        rxhash = skb_get_hash(skb);
        netif_rx_ni(skb);
 
-       tun->dev->stats.rx_packets++;
-       tun->dev->stats.rx_bytes += len;
+       stats = get_cpu_ptr(tun->pcpu_stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += len;
+       u64_stats_update_end(&stats->syncp);
+       put_cpu_ptr(stats);
 
        tun_flow_update(tun, rxhash, tfile);
        return total_len;
@@ -1310,6 +1366,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                            struct iov_iter *iter)
 {
        struct tun_pi pi = { 0, skb->protocol };
+       struct tun_pcpu_stats *stats;
        ssize_t total;
        int vlan_offset = 0;
        int vlan_hlen = 0;
@@ -1410,8 +1467,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
 
 done:
-       tun->dev->stats.tx_packets++;
-       tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
+       /* caller is in process context, */
+       stats = get_cpu_ptr(tun->pcpu_stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->tx_packets++;
+       stats->tx_bytes += skb->len + vlan_hlen;
+       u64_stats_update_end(&stats->syncp);
+       put_cpu_ptr(tun->pcpu_stats);
 
        return total;
 }
@@ -1469,6 +1531,7 @@ static void tun_free_netdev(struct net_device *dev)
        struct tun_struct *tun = netdev_priv(dev);
 
        BUG_ON(!(list_empty(&tun->disabled)));
+       free_percpu(tun->pcpu_stats);
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
        free_netdev(dev);
@@ -1483,6 +1546,8 @@ static void tun_setup(struct net_device *dev)
 
        dev->ethtool_ops = &tun_ethtool_ops;
        dev->destructor = tun_free_netdev;
+       /* We prefer our own queue length */
+       dev->tx_queue_len = TUN_READQ_SIZE;
 }
 
 /* Trivial set of netlink ops to allow deleting tun or tap
@@ -1715,11 +1780,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun->filter_attached = false;
                tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 
+               tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+               if (!tun->pcpu_stats) {
+                       err = -ENOMEM;
+                       goto err_free_dev;
+               }
+
                spin_lock_init(&tun->lock);
 
                err = security_tun_dev_alloc_security(&tun->security);
                if (err < 0)
-                       goto err_free_dev;
+                       goto err_free_stat;
 
                tun_net_init(dev);
                tun_flow_init(tun);
@@ -1727,7 +1798,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
                                   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
                                   NETIF_F_HW_VLAN_STAG_TX;
-               dev->features = dev->hw_features;
+               dev->features = dev->hw_features | NETIF_F_LLTX;
                dev->vlan_features = dev->features &
                                     ~(NETIF_F_HW_VLAN_CTAG_TX |
                                       NETIF_F_HW_VLAN_STAG_TX);
@@ -1763,6 +1834,8 @@ err_detach:
 err_free_flow:
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
+err_free_stat:
+       free_percpu(tun->pcpu_stats);
 err_free_dev:
        free_netdev(dev);
        return err;
index 4e2b26a88b15f03ba8302d5d7a83d1a5b1a4d4ea..d9ca05d3ac8eadb17a0ec12d539c50dde7fb151b 100644 (file)
@@ -376,7 +376,7 @@ static int catc_tx_run(struct catc *catc)
        catc->tx_idx = !catc->tx_idx;
        catc->tx_ptr = 0;
 
-       catc->netdev->trans_start = jiffies;
+       netif_trans_update(catc->netdev);
        return status;
 }
 
@@ -389,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
        if (status == -ECONNRESET) {
                dev_dbg(&urb->dev->dev, "Tx Reset.\n");
                urb->status = 0;
-               catc->netdev->trans_start = jiffies;
+               netif_trans_update(catc->netdev);
                catc->netdev->stats.tx_errors++;
                clear_bit(TX_RUNNING, &catc->flags);
                netif_wake_queue(catc->netdev);
index bdd83d95ec0aa677b8443476ef2ee66018aff701..96a5028621c8b320c2d6feca5911f40cde10771e 100644 (file)
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info,
        },
-       /* Huawei E3372 fails unless NDP comes after the IP packets */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+       /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+        * (12d1:157d), are known to fail unless the NDP is placed
+        * after the IP packets.  Applying the quirk to all Huawei
+        * devices is broader than necessary, but harmless.
+        */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
        },
        /* default entry */
index f64b25c221e83b63704db0d856132ca96b7e606c..770212baaf05ddb032967730acc6b0ff46ec8f40 100644 (file)
@@ -938,7 +938,7 @@ static void kaweth_tx_timeout(struct net_device *net)
 
        dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
        kaweth->stats.tx_errors++;
-       net->trans_start = jiffies;
+       netif_trans_update(net);
 
        usb_unlink_urb(kaweth->tx_urb);
 }
index f20890ee03f33368fd68c6b5fb82f8fd76fa4310..6a9d474b08b21cb7e9489906f44c17a63cc3bd39 100644 (file)
@@ -269,6 +269,7 @@ struct skb_data {           /* skb->cb is one of these */
        struct lan78xx_net *dev;
        enum skb_state state;
        size_t length;
+       int num_of_packet;
 };
 
 struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
 
 static void lan78xx_link_status_change(struct net_device *net)
 {
-       /* nothing to do */
+       struct phy_device *phydev = net->phydev;
+       int ret, temp;
+
+       /* At forced 100 F/H mode, chip may fail to set mode correctly
+        * when cable is switched between long(~50+m) and short one.
+        * As workaround, set to 10 before setting to 100
+        * at forced 100 F/H mode.
+        */
+       if (!phydev->autoneg && (phydev->speed == 100)) {
+               /* disable phy interrupt */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+               temp = phy_read(phydev, MII_BMCR);
+               temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+               phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+               temp |= BMCR_SPEED100;
+               phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+               /* clear pending interrupt generated while workaround */
+               temp = phy_read(phydev, LAN88XX_INT_STS);
+
+               /* enable phy interrupt back */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+       }
 }
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
        struct lan78xx_net *dev = entry->dev;
 
        if (urb->status == 0) {
-               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_packets += entry->num_of_packet;
                dev->net->stats.tx_bytes += entry->length;
        } else {
                dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
                return;
        }
 
-       skb->protocol = eth_type_trans(skb, dev->net);
        dev->net->stats.rx_packets++;
        dev->net->stats.rx_bytes += skb->len;
 
+       skb->protocol = eth_type_trans(skb, dev->net);
+
        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
                  skb->len + sizeof(struct ethhdr), skb->protocol);
        memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
 
        skb_totallen = 0;
        pkt_cnt = 0;
+       count = 0;
+       length = 0;
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
                                /* handle previous packets first */
                                break;
                        }
-                       length = skb->len;
+                       count = 1;
+                       length = skb->len - TX_OVERHEAD;
                        skb2 = skb_dequeue(tqp);
                        goto gso_skb;
                }
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        for (count = pos = 0; count < pkt_cnt; count++) {
                skb2 = skb_dequeue(tqp);
                if (skb2) {
+                       length += (skb2->len - TX_OVERHEAD);
                        memcpy(skb->data + pos, skb2->data, skb2->len);
                        pos += roundup(skb2->len, sizeof(u32));
                        dev_kfree_skb(skb2);
                }
        }
 
-       length = skb_totallen;
-
 gso_skb:
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
        entry->urb = urb;
        entry->dev = dev;
        entry->length = length;
+       entry->num_of_packet = count;
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        ret = usb_autopm_get_interface_async(dev->intf);
@@ -3013,7 +3045,7 @@ gso_skb:
        ret = usb_submit_urb(urb, GFP_ATOMIC);
        switch (ret) {
        case 0:
-               dev->net->trans_start = jiffies;
+               netif_trans_update(dev->net);
                lan78xx_queue_skb(&dev->txq, skb, tx_start);
                if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
                        netif_stop_queue(dev->net);
@@ -3697,7 +3729,7 @@ int lan78xx_resume(struct usb_interface *intf)
                                usb_free_urb(res);
                                usb_autopm_put_interface_async(dev->intf);
                        } else {
-                               dev->net->trans_start = jiffies;
+                               netif_trans_update(dev->net);
                                lan78xx_queue_skb(&dev->txq, skb, tx_start);
                        }
                }
index f840802159158b56a5b840d7abb83152fcd0c83f..36cd7f016a8de63490dfcbe89e38f6d9e4e4b7a6 100644 (file)
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
        int ret;
 
        read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
-       data[0] = 0xc9;
+       data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
                data[1] |= 0x20;        /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
                pkt_len = buf[count - 3] << 8;
                pkt_len += buf[count - 4];
                pkt_len &= 0xfff;
-               pkt_len -= 8;
+               pkt_len -= 4;
        }
 
        /*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
 goon:
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
        if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
        }
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
 try_again:
        status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -615,7 +615,7 @@ static void write_bulk_callback(struct urb *urb)
                break;
        }
 
-       net->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(net); /* prevent tx timeout */
        netif_wake_queue(net);
 }
 
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
                if (res == -ENODEV)
index d1f78c2c97aaf9bcfded27cd0fb826608579a6d2..3f9f6ed3eec4de5c14e43039e08d53b209e52cb4 100644 (file)
@@ -3366,7 +3366,7 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
 
        ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM;
-       if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
+       if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER)
                ocp_data |= LPM_TIMER_500MS;
        else
                ocp_data |= LPM_TIMER_500US;
@@ -4211,6 +4211,7 @@ static int rtl8152_probe(struct usb_interface *intf,
 
        switch (udev->speed) {
        case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
                tp->coalesce = COALESCE_SUPER;
                break;
        case USB_SPEED_HIGH:
index d37b7dce2d405af079e8067ad032f0acdbdbbf9b..7c72bfac89d08df2f6c873ffe0a11e51364e9547 100644 (file)
@@ -451,7 +451,7 @@ static void write_bulk_callback(struct urb *urb)
        if (status)
                dev_info(&urb->dev->dev, "%s: Tx status %d\n",
                         dev->netdev->name, status);
-       dev->netdev->trans_start = jiffies;
+       netif_trans_update(dev->netdev);
        netif_wake_queue(dev->netdev);
 }
 
@@ -694,7 +694,7 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
        } else {
                netdev->stats.tx_packets++;
                netdev->stats.tx_bytes += skb->len;
-               netdev->trans_start = jiffies;
+               netif_trans_update(netdev);
        }
 
        return NETDEV_TX_OK;
index 30033dbe666263f12ed05cbe7d2fe1e924b156e4..9af9799935dbbd4baed06eb03402803e0ce8d9c8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc75xx.h"
 
 #define SMSC_CHIPNAME                  "smsc75xx"
@@ -98,9 +99,11 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
        ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
                 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                 0, index, &buf, 4);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
                netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
                            index, ret);
+               return ret;
+       }
 
        le32_to_cpus(&buf);
        *data = buf;
@@ -761,6 +764,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc75xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -772,7 +784,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 66b3ab9f614eb07edb05757ee333e998f7841f7d..d9d2806a47b12aa0e7a7e7838e31c100fe22f1d7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc95xx.h"
 
 #define SMSC_CHIPNAME                  "smsc95xx"
@@ -91,9 +92,11 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
        ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
                 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                 0, index, &buf, 4);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
                netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
                            index, ret);
+               return ret;
+       }
 
        le32_to_cpus(&buf);
        *data = buf;
@@ -765,6 +768,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc95xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -775,7 +787,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 10798128c03fc64881c5a437df3510f016461b7c..61ba464049374593316e1c0b869774861ee79b5e 100644 (file)
@@ -356,6 +356,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
                dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
                break;
        case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
                /*
                 * Not take default 5ms qlen for super speed HC to
                 * save memory, and iperf tests show 2.5ms qlen can
@@ -1415,7 +1416,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                          "tx: submit urb err %d\n", retval);
                break;
        case 0:
-               net->trans_start = jiffies;
+               netif_trans_update(net);
                __usbnet_queue_skb(&dev->txq, skb, tx_start);
                if (dev->txq.qlen >= TX_QLEN (dev))
                        netif_stop_queue (net);
@@ -1844,7 +1845,7 @@ int usbnet_resume (struct usb_interface *intf)
                                usb_free_urb(res);
                                usb_autopm_put_interface_async(dev->intf);
                        } else {
-                               dev->net->trans_start = jiffies;
+                               netif_trans_update(dev->net);
                                __skb_queue_tail(&dev->txq, skb);
                        }
                }
index 4f30a6ae50d029f8ce0bb8105490b13489a5f090..f37a6e61d4ad53ecc4248b1dad8861f6574d7ab4 100644 (file)
@@ -312,10 +312,9 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_set_rx_headroom    = veth_set_rx_headroom,
 };
 
-#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
-                      NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
-                      NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |           \
-                      NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO |   \
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
+                      NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+                      NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
                       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
                       NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
 
index b2348f67b00a7044189f6650000eea470a2ae386..db8022ae415bd234c19c8348dc6f90697d6ad840 100644 (file)
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                union Vmxnet3_GenericDesc *gdesc)
 {
        if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
-               /* typical case: TCP/UDP over IP and both csums are correct */
-               if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
-                                                       VMXNET3_RCD_CSUM_OK) {
+               if (gdesc->rcd.v4 &&
+                   (le32_to_cpu(gdesc->dword[3]) &
+                    VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+                       BUG_ON(gdesc->rcd.frg);
+               } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+                                            (1 << VMXNET3_RCD_TUC_SHIFT))) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
-                       BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
                        BUG_ON(gdesc->rcd.frg);
                } else {
                        if (gdesc->rcd.csum) {
index 729c344e677499b6f737aa622d6925c07d647a17..c4825392d64b6c8de17822532d113689f88d8bb7 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 9a9fabb900c19e5f7dc483a63f1b71fcd1a96e5a..0ea29345eb2e92cd2fb4be55584d9f0410444ac1 100644 (file)
@@ -42,9 +42,6 @@
 #define DRV_NAME       "vrf"
 #define DRV_VERSION    "1.0"
 
-#define vrf_master_get_rcu(dev) \
-       ((struct net_device *)rcu_dereference(dev->rx_handler_data))
-
 struct net_vrf {
        struct rtable           *rth;
        struct rt6_info         *rt6;
@@ -60,125 +57,12 @@ struct pcpu_dstats {
        struct u64_stats_sync   syncp;
 };
 
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-       return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
-       /* TO-DO: return max ethernet size? */
-       return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
-       /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
-       return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
-       .family         = AF_INET,
-       .local_out      = vrf_ip_local_out,
-       .check          = vrf_ip_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
-/* neighbor handling is done with actual device; do not want
- * to flip skb->dev for those ndisc packets. This really fails
- * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
- * a start.
- */
-#if IS_ENABLED(CONFIG_IPV6)
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
-       const struct ipv6hdr *ipv6h;
-       struct ipv6hdr _ipv6h;
-       bool rc = true;
-
-       ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
-       if (!ipv6h)
-               goto out;
-
-       if (ipv6h->nexthdr == NEXTHDR_ICMP) {
-               const struct icmp6hdr *icmph;
-               struct icmp6hdr _icmph;
-
-               icmph = skb_header_pointer(skb, sizeof(_ipv6h),
-                                          sizeof(_icmph), &_icmph);
-               if (!icmph)
-                       goto out;
-
-               switch (icmph->icmp6_type) {
-               case NDISC_ROUTER_SOLICITATION:
-               case NDISC_ROUTER_ADVERTISEMENT:
-               case NDISC_NEIGHBOUR_SOLICITATION:
-               case NDISC_NEIGHBOUR_ADVERTISEMENT:
-               case NDISC_REDIRECT:
-                       rc = false;
-                       break;
-               }
-       }
-
-out:
-       return rc;
-}
-#else
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
-       return false;
-}
-#endif
-
-static bool is_ip_rx_frame(struct sk_buff *skb)
-{
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               return true;
-       case htons(ETH_P_IPV6):
-               return check_ipv6_frame(skb);
-       }
-       return false;
-}
-
 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
 {
        vrf_dev->stats.tx_errors++;
        kfree_skb(skb);
 }
 
-/* note: already called with rcu_read_lock */
-static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
-{
-       struct sk_buff *skb = *pskb;
-
-       if (is_ip_rx_frame(skb)) {
-               struct net_device *dev = vrf_master_get_rcu(skb->dev);
-               struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
-               u64_stats_update_begin(&dstats->syncp);
-               dstats->rx_pkts++;
-               dstats->rx_bytes += skb->len;
-               u64_stats_update_end(&dstats->syncp);
-
-               skb->dev = dev;
-
-               return RX_HANDLER_ANOTHER;
-       }
-       return RX_HANDLER_PASS;
-}
-
 static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
                                                 struct rtnl_link_stats64 *stats)
 {
@@ -349,46 +233,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
-       .family         = AF_INET6,
-       .local_out      = ip6_local_out,
-       .check          = vrf_ip6_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
-       vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
-                                                    sizeof(struct rt6_info),
-                                                    0,
-                                                    SLAB_HWCACHE_ALIGN,
-                                                    NULL);
-
-       if (!vrf_dst_ops6.kmem_cachep)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-       kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
-       skb->dev->stats.rx_errors++;
-       kfree_skb(skb);
-       return 0;
-}
-
 /* modelled after ip6_finish_output2 */
 static int vrf_finish_output6(struct net *net, struct sock *sk,
                              struct sk_buff *skb)
@@ -429,67 +273,40 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
-       dst_destroy(&vrf->rt6->dst);
-       free_percpu(vrf->rt6->rt6i_pcpu);
+       dst_release(&vrf->rt6->dst);
        vrf->rt6 = NULL;
 }
 
 static int vrf_rt6_create(struct net_device *dev)
 {
        struct net_vrf *vrf = netdev_priv(dev);
-       struct dst_entry *dst;
+       struct net *net = dev_net(dev);
+       struct fib6_table *rt6i_table;
        struct rt6_info *rt6;
-       int cpu;
        int rc = -ENOMEM;
 
-       rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
-       if (!rt6)
+       rt6i_table = fib6_new_table(net, vrf->tb_id);
+       if (!rt6i_table)
                goto out;
 
-       dst = &rt6->dst;
-
-       rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
-       if (!rt6->rt6i_pcpu) {
-               dst_destroy(dst);
+       rt6 = ip6_dst_alloc(net, dev,
+                           DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
+       if (!rt6)
                goto out;
-       }
-       for_each_possible_cpu(cpu) {
-               struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
-               *p =  NULL;
-       }
-
-       memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
 
-       INIT_LIST_HEAD(&rt6->rt6i_siblings);
-       INIT_LIST_HEAD(&rt6->rt6i_uncached);
+       dst_hold(&rt6->dst);
 
-       rt6->dst.input  = vrf_input6;
+       rt6->rt6i_table = rt6i_table;
        rt6->dst.output = vrf_output6;
-
-       rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
-       atomic_set(&rt6->dst.__refcnt, 2);
-
        vrf->rt6 = rt6;
        rc = 0;
 out:
        return rc;
 }
 #else
-static int init_dst_ops6_kmem_cachep(void)
-{
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
 }
 
@@ -557,11 +374,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
 {
        struct dst_entry *dst = (struct dst_entry *)vrf->rth;
 
-       dst_destroy(dst);
+       dst_release(dst);
        vrf->rth = NULL;
 }
 
@@ -570,22 +387,13 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
        struct net_vrf *vrf = netdev_priv(dev);
        struct rtable *rth;
 
-       rth = dst_alloc(&vrf_dst_ops, dev, 2,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       if (!fib_new_table(dev_net(dev), vrf->tb_id))
+               return NULL;
+
+       rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
        if (rth) {
                rth->dst.output = vrf_output;
-               rth->rt_genid   = rt_genid_ipv4(dev_net(dev));
-               rth->rt_flags   = 0;
-               rth->rt_type    = RTN_UNICAST;
-               rth->rt_is_input = 0;
-               rth->rt_iif     = 0;
-               rth->rt_pmtu    = 0;
-               rth->rt_gateway = 0;
-               rth->rt_uses_gateway = 0;
                rth->rt_table_id = vrf->tb_id;
-               INIT_LIST_HEAD(&rth->rt_uncached);
-               rth->rt_uncached_list = NULL;
        }
 
        return rth;
@@ -617,28 +425,14 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
        int ret;
 
-       /* register the packet handler for slave ports */
-       ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
-       if (ret) {
-               netdev_err(port_dev,
-                          "Device %s failed to register rx_handler\n",
-                          port_dev->name);
-               goto out_fail;
-       }
-
        ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
        if (ret < 0)
-               goto out_unregister;
+               return ret;
 
        port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
        cycle_netdev(port_dev);
 
        return 0;
-
-out_unregister:
-       netdev_rx_handler_unregister(port_dev);
-out_fail:
-       return ret;
 }
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -655,8 +449,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
        netdev_upper_dev_unlink(port_dev, dev);
        port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
 
-       netdev_rx_handler_unregister(port_dev);
-
        cycle_netdev(port_dev);
 
        return 0;
@@ -673,8 +465,8 @@ static void vrf_dev_uninit(struct net_device *dev)
        struct net_device *port_dev;
        struct list_head *iter;
 
-       vrf_rtable_destroy(vrf);
-       vrf_rt6_destroy(vrf);
+       vrf_rtable_release(vrf);
+       vrf_rt6_release(vrf);
 
        netdev_for_each_lower_dev(dev, port_dev, iter)
                vrf_del_slave(dev, port_dev);
@@ -704,7 +496,7 @@ static int vrf_dev_init(struct net_device *dev)
        return 0;
 
 out_rth:
-       vrf_rtable_destroy(vrf);
+       vrf_rtable_release(vrf);
 out_stats:
        free_percpu(dev->dstats);
        dev->dstats = NULL;
@@ -737,7 +529,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rth = vrf->rth;
-               atomic_inc(&rth->dst.__refcnt);
+               dst_hold(&rth->dst);
        }
 
        return rth;
@@ -759,6 +551,8 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
 
        fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
        fl4->flowi4_iif = LOOPBACK_IFINDEX;
+       /* make sure oif is set to VRF device for lookup */
+       fl4->flowi4_oif = dev->ifindex;
        fl4->flowi4_tos = tos & IPTOS_RT_MASK;
        fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
                             RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -778,6 +572,95 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
        return rc;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+static bool ipv6_ndisc_frame(const struct sk_buff *skb)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       bool rc = false;
+
+       if (iph->nexthdr == NEXTHDR_ICMP) {
+               const struct icmp6hdr *icmph;
+               struct icmp6hdr _icmph;
+
+               icmph = skb_header_pointer(skb, sizeof(*iph),
+                                          sizeof(_icmph), &_icmph);
+               if (!icmph)
+                       goto out;
+
+               switch (icmph->icmp6_type) {
+               case NDISC_ROUTER_SOLICITATION:
+               case NDISC_ROUTER_ADVERTISEMENT:
+               case NDISC_NEIGHBOUR_SOLICITATION:
+               case NDISC_NEIGHBOUR_ADVERTISEMENT:
+               case NDISC_REDIRECT:
+                       rc = true;
+                       break;
+               }
+       }
+
+out:
+       return rc;
+}
+
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+                                  struct sk_buff *skb)
+{
+       /* if packet is NDISC keep the ingress interface */
+       if (!ipv6_ndisc_frame(skb)) {
+               skb->dev = vrf_dev;
+               skb->skb_iif = vrf_dev->ifindex;
+
+               skb_push(skb, skb->mac_len);
+               dev_queue_xmit_nit(skb, vrf_dev);
+               skb_pull(skb, skb->mac_len);
+
+               IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+       }
+
+       return skb;
+}
+
+#else
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+                                  struct sk_buff *skb)
+{
+       return skb;
+}
+#endif
+
+static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+                                 struct sk_buff *skb)
+{
+       skb->dev = vrf_dev;
+       skb->skb_iif = vrf_dev->ifindex;
+
+       skb_push(skb, skb->mac_len);
+       dev_queue_xmit_nit(skb, vrf_dev);
+       skb_pull(skb, skb->mac_len);
+
+       return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
+                                 struct sk_buff *skb,
+                                 u16 proto)
+{
+       switch (proto) {
+       case AF_INET:
+               return vrf_ip_rcv(vrf_dev, skb);
+       case AF_INET6:
+               return vrf_ip6_rcv(vrf_dev, skb);
+       }
+
+       return skb;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
                                         const struct flowi6 *fl6)
@@ -788,7 +671,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rt = vrf->rt6;
-               atomic_inc(&rt->dst.__refcnt);
+               dst_hold(&rt->dst);
        }
 
        return (struct dst_entry *)rt;
@@ -799,6 +682,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
        .l3mdev_fib_table       = vrf_fib_table,
        .l3mdev_get_rtable      = vrf_get_rtable,
        .l3mdev_get_saddr       = vrf_get_saddr,
+       .l3mdev_l3_rcv          = vrf_l3_rcv,
 #if IS_ENABLED(CONFIG_IPV6)
        .l3mdev_get_rt6_dst     = vrf_get_rt6_dst,
 #endif
@@ -946,19 +830,6 @@ static int __init vrf_init_module(void)
 {
        int rc;
 
-       vrf_dst_ops.kmem_cachep =
-               kmem_cache_create("vrf_ip_dst_cache",
-                                 sizeof(struct rtable), 0,
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
-
-       if (!vrf_dst_ops.kmem_cachep)
-               return -ENOMEM;
-
-       rc = init_dst_ops6_kmem_cachep();
-       if (rc != 0)
-               goto error2;
-
        register_netdevice_notifier(&vrf_notifier_block);
 
        rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +840,10 @@ static int __init vrf_init_module(void)
 
 error:
        unregister_netdevice_notifier(&vrf_notifier_block);
-       free_dst_ops6_kmem_cachep();
-error2:
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
        return rc;
 }
 
-static void __exit vrf_cleanup_module(void)
-{
-       rtnl_link_unregister(&vrf_link_ops);
-       unregister_netdevice_notifier(&vrf_notifier_block);
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
-       free_dst_ops6_kmem_cachep();
-}
-
 module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
 MODULE_LICENSE("GPL");
index 9f3634064c921fbd212d022705a5d21b355da316..2f29d20aa08f661c81630e7a3b4b922ed1b61966 100644 (file)
@@ -98,7 +98,6 @@ struct vxlan_fdb {
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
-static struct workqueue_struct *vxlan_wq;
 
 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
 {
@@ -614,8 +613,9 @@ out:
 
 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 {
-       udp_tunnel_gro_complete(skb, nhoff);
-
+       /* Sets 'skb->inner_mac_header' since we are always called with
+        * 'skb->encapsulation' set.
+        */
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
@@ -1038,14 +1038,14 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
        return false;
 }
 
-static void __vxlan_sock_release(struct vxlan_sock *vs)
+static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
 {
        struct vxlan_net *vn;
 
        if (!vs)
-               return;
+               return false;
        if (!atomic_dec_and_test(&vs->refcnt))
-               return;
+               return false;
 
        vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
        spin_lock(&vn->sock_lock);
@@ -1053,14 +1053,28 @@ static void __vxlan_sock_release(struct vxlan_sock *vs)
        vxlan_notify_del_rx_port(vs);
        spin_unlock(&vn->sock_lock);
 
-       queue_work(vxlan_wq, &vs->del_work);
+       return true;
 }
 
 static void vxlan_sock_release(struct vxlan_dev *vxlan)
 {
-       __vxlan_sock_release(vxlan->vn4_sock);
+       bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
 #if IS_ENABLED(CONFIG_IPV6)
-       __vxlan_sock_release(vxlan->vn6_sock);
+       bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+#endif
+
+       synchronize_net();
+
+       if (ipv4) {
+               udp_tunnel_sock_release(vxlan->vn4_sock->sock);
+               kfree(vxlan->vn4_sock);
+       }
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (ipv6) {
+               udp_tunnel_sock_release(vxlan->vn6_sock->sock);
+               kfree(vxlan->vn6_sock);
+       }
 #endif
 }
 
@@ -1181,7 +1195,7 @@ out:
 }
 
 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
-                               __be32 *protocol,
+                               __be16 *protocol,
                                struct sk_buff *skb, u32 vxflags)
 {
        struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
@@ -1284,7 +1298,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
        struct vxlanhdr unparsed;
        struct vxlan_metadata _md;
        struct vxlan_metadata *md = &_md;
-       __be32 protocol = htons(ETH_P_TEB);
+       __be16 protocol = htons(ETH_P_TEB);
        bool raw_proto = false;
        void *oiph;
 
@@ -1784,9 +1798,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
        if (WARN_ON(!skb))
                return -ENOMEM;
 
-       skb = iptunnel_handle_offloads(skb, type);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               goto out_free;
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = VXLAN_HF_VNI;
@@ -2514,7 +2528,7 @@ static struct device_type vxlan_type = {
  * supply the listening VXLAN udp ports. Callers are expected
  * to implement the ndo_add_vxlan_port.
  */
-void vxlan_get_rx_port(struct net_device *dev)
+static void vxlan_push_rx_ports(struct net_device *dev)
 {
        struct vxlan_sock *vs;
        struct net *net = dev_net(dev);
@@ -2523,6 +2537,9 @@ void vxlan_get_rx_port(struct net_device *dev)
        __be16 port;
        unsigned int i;
 
+       if (!dev->netdev_ops->ndo_add_vxlan_port)
+               return;
+
        spin_lock(&vn->sock_lock);
        for (i = 0; i < PORT_HASH_SIZE; ++i) {
                hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
@@ -2534,7 +2551,6 @@ void vxlan_get_rx_port(struct net_device *dev)
        }
        spin_unlock(&vn->sock_lock);
 }
-EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
 
 /* Initialize the device structure. */
 static void vxlan_setup(struct net_device *dev)
@@ -2542,6 +2558,9 @@ static void vxlan_setup(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
 
+       eth_hw_addr_random(dev);
+       ether_setup(dev);
+
        dev->destructor = free_netdev;
        SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
@@ -2577,8 +2596,6 @@ static void vxlan_setup(struct net_device *dev)
 
 static void vxlan_ether_setup(struct net_device *dev)
 {
-       eth_hw_addr_random(dev);
-       ether_setup(dev);
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        dev->netdev_ops = &vxlan_netdev_ether_ops;
@@ -2586,11 +2603,10 @@ static void vxlan_ether_setup(struct net_device *dev)
 
 static void vxlan_raw_setup(struct net_device *dev)
 {
+       dev->header_ops = NULL;
        dev->type = ARPHRD_NONE;
        dev->hard_header_len = 0;
        dev->addr_len = 0;
-       dev->mtu = ETH_DATA_LEN;
-       dev->tx_queue_len = 1000;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
        dev->netdev_ops = &vxlan_netdev_raw_ops;
 }
@@ -2674,13 +2690,6 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
        .get_link       = ethtool_op_get_link,
 };
 
-static void vxlan_del_work(struct work_struct *work)
-{
-       struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
-       udp_tunnel_sock_release(vs->sock);
-       kfree_rcu(vs, rcu);
-}
-
 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
                                        __be16 port, u32 flags)
 {
@@ -2726,8 +2735,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
        for (h = 0; h < VNI_HASH_SIZE; ++h)
                INIT_HLIST_HEAD(&vs->vni_list[h]);
 
-       INIT_WORK(&vs->del_work, vxlan_del_work);
-
        sock = vxlan_create_sock(net, ipv6, port, flags);
        if (IS_ERR(sock)) {
                pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
@@ -3279,20 +3286,22 @@ static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
        unregister_netdevice_many(&list_kill);
 }
 
-static int vxlan_lowerdev_event(struct notifier_block *unused,
-                               unsigned long event, void *ptr)
+static int vxlan_netdevice_event(struct notifier_block *unused,
+                                unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
 
        if (event == NETDEV_UNREGISTER)
                vxlan_handle_lowerdev_unregister(vn, dev);
+       else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+               vxlan_push_rx_ports(dev);
 
        return NOTIFY_DONE;
 }
 
 static struct notifier_block vxlan_notifier_block __read_mostly = {
-       .notifier_call = vxlan_lowerdev_event,
+       .notifier_call = vxlan_netdevice_event,
 };
 
 static __net_init int vxlan_init_net(struct net *net)
@@ -3346,10 +3355,6 @@ static int __init vxlan_init_module(void)
 {
        int rc;
 
-       vxlan_wq = alloc_workqueue("vxlan", 0, 0);
-       if (!vxlan_wq)
-               return -ENOMEM;
-
        get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
 
        rc = register_pernet_subsys(&vxlan_net_ops);
@@ -3370,7 +3375,6 @@ out3:
 out2:
        unregister_pernet_subsys(&vxlan_net_ops);
 out1:
-       destroy_workqueue(vxlan_wq);
        return rc;
 }
 late_initcall(vxlan_init_module);
@@ -3379,7 +3383,6 @@ static void __exit vxlan_cleanup_module(void)
 {
        rtnl_link_unregister(&vxlan_link_ops);
        unregister_netdevice_notifier(&vxlan_notifier_block);
-       destroy_workqueue(vxlan_wq);
        unregister_pernet_subsys(&vxlan_net_ops);
        /* rcu_barrier() is called by netns */
 }
index 848ea6a399f236b14cc5d9a79dd38038e0331aec..b87fe0a01c69ff3bb53b609632f5016eb8806073 100644 (file)
@@ -739,7 +739,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size)
                chan->netdev->stats.rx_dropped++;
                return NULL;
        }
-       chan->netdev->trans_start = jiffies;
+       netif_trans_update(chan->netdev);
        return skb_put(chan->rx_skb, size);
 }
 
index 69b994f3b8c528406ef3f28c02147b0070a074e3..3c9cbf908ec73cd95a75d59a289eef5e68e5f414 100644 (file)
@@ -831,7 +831,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
                DMA_OWN | TX_STP | TX_ENP);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += len;
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 }
 
 /*
@@ -1389,7 +1389,7 @@ do_bottom_half_tx(struct fst_card_info *card)
                                                DMA_OWN | TX_STP | TX_ENP);
                                        dev->stats.tx_packets++;
                                        dev->stats.tx_bytes += skb->len;
-                                       dev->trans_start = jiffies;
+                                       netif_trans_update(dev);
                                } else {
                                        /* Or do it through dma */
                                        memcpy(card->tx_dma_handle_host,
@@ -2258,7 +2258,7 @@ fst_tx_timeout(struct net_device *dev)
            card->card_no, port->index);
        fst_issue_cmd(port, ABORTTX);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
        port->start = 0;
 }
index bb33b242ab48d66cd354c5626c8fb0de81be125e..299140c04556979b9fc75b2a43211b325c81243a 100644 (file)
@@ -2105,7 +2105,7 @@ static void lmc_driver_timeout(struct net_device *dev)
     sc->lmc_device->stats.tx_errors++;
     sc->extra_stats.tx_ProcTimeout++; /* -baz */
 
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
 
 bug_out:
 
index 8fef8d83436ddef9682df26e8ed2af6f2ae13377..d98c7e57137d22edd8d2fa8f20d8632ca621ab04 100644 (file)
@@ -860,9 +860,9 @@ prepare_to_send( struct sk_buff  *skb,  struct net_device  *dev )
 
        outb( inb( dev->base_addr + CSR0 ) | TR_REQ,  dev->base_addr + CSR0 );
 #ifdef CONFIG_SBNI_MULTILINE
-       nl->master->trans_start = jiffies;
+       netif_trans_update(nl->master);
 #else
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 #endif
 }
 
@@ -889,10 +889,10 @@ drop_xmit_queue( struct net_device  *dev )
        nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
 #ifdef CONFIG_SBNI_MULTILINE
        netif_start_queue( nl->master );
-       nl->master->trans_start = jiffies;
+       netif_trans_update(nl->master);
 #else
        netif_start_queue( dev );
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 #endif
 }
 
index a9970f1af976a5b0c34d41be8df61ed47365713b..bb74f4b9a02fb5dd3fe1ff1ad0ec112430b46d99 100644 (file)
@@ -334,7 +334,7 @@ int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
        d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
                  i2400m, net_dev, skb);
        /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
-       net_dev->trans_start = jiffies;
+       netif_trans_update(net_dev);
        i2400m_tx_prep_header(skb);
        d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
                 skb, skb->len);
index 15f057ed41adffd2bc85e6761b762fd37ff190da..70ecd82d674dcee50aef2cbf64b321959f09544a 100644 (file)
@@ -440,7 +440,7 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
                        rx_status.rate_idx = rate;
 
                        rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
-                       rx_status.band = IEEE80211_BAND_2GHZ;
+                       rx_status.band = NL80211_BAND_2GHZ;
 
                        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
                        ieee80211_rx_irqsafe(dev, skb);
@@ -1894,7 +1894,7 @@ static int adm8211_probe(struct pci_dev *pdev,
 
        priv->channel = 1;
 
-       dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
        err = ieee80211_register_hw(dev);
        if (err) {
index 3b343c63aa5235afbcb05eed54b7e945f8b50d80..8aded24bcdf49e30db4f43be74f92b9c47139fed 100644 (file)
@@ -1471,12 +1471,12 @@ static int ar5523_init_modes(struct ar5523 *ar)
        memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
        memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
 
-       ar->band.band = IEEE80211_BAND_2GHZ;
+       ar->band.band = NL80211_BAND_2GHZ;
        ar->band.channels = ar->channels;
        ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
        ar->band.bitrates = ar->rates;
        ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
-       ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+       ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
        return 0;
 }
 
index 65ef483ebf5053a8f1b79ba0e41dfad19702d530..da7a7c8dafb262f579594466d9a7423c42605059 100644 (file)
@@ -185,7 +185,7 @@ struct ath_common {
        bool bt_ant_diversity;
 
        int last_rssi;
-       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 };
 
 static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
index edf3629288bc49cb1832b41028efdd79b8b32546..9fb8d7472d18631b8aaac1cafbfb1c61078cfa2d 100644 (file)
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 
        lockdep_assert_held(&ar_pci->ce_lock);
 
-       if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+       if ((pipe->id != 5) &&
+           CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
                return -ENOSPC;
 
        desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
        return 0;
 }
 
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+       struct ath10k *ar = pipe->ar;
+       struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int write_index = dest_ring->write_index;
+       u32 ctrl_addr = pipe->ctrl_addr;
+
+       write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+       ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+       dest_ring->write_index = write_index;
+}
+
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 {
        struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  */
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                         void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp,
-                                        unsigned int *flagsp)
+                                        unsigned int *nbytesp)
 {
        struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
-       struct ath10k *ar = ce_state->ar;
        unsigned int sw_index = dest_ring->sw_index;
 
        struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
        desc->nbytes = 0;
 
        /* Return data from completed destination descriptor */
-       *bufferp = __le32_to_cpu(sdesc.addr);
        *nbytesp = nbytes;
-       *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
-       if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
-               *flagsp = CE_RECV_FLAG_SWAPPED;
-       else
-               *flagsp = 0;
 
        if (per_transfer_contextp)
                *per_transfer_contextp =
                        dest_ring->per_transfer_context[sw_index];
 
-       /* sanity */
-       dest_ring->per_transfer_context[sw_index] = NULL;
+       /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+        * So update transfer context all CEs except CE5.
+        */
+       if (ce_state->id != 5)
+               dest_ring->per_transfer_context[sw_index] = NULL;
 
        /* Update sw_index */
        sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp,
-                                 unsigned int *flagsp)
+                                 unsigned int *nbytesp)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
        spin_lock_bh(&ar_pci->ce_lock);
        ret = ath10k_ce_completed_recv_next_nolock(ce_state,
                                                   per_transfer_contextp,
-                                                  bufferp, nbytesp,
-                                                  transfer_idp, flagsp);
+                                                  nbytesp);
        spin_unlock_bh(&ar_pci->ce_lock);
 
        return ret;
@@ -1048,11 +1050,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
         *
         * For the lack of a better place do the check here.
         */
-       BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+       BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
                     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-       BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+       BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
                     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-       BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+       BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
                     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
 
        ce_state->ar = ar;
index 47b734ce7ecf42112a582eaa2756ec3d6aca3868..dfc098606bee16be8e6e724b2aeae9a70ffd7a90 100644 (file)
@@ -22,7 +22,7 @@
 
 /* Maximum number of Copy Engine's supported */
 #define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN     8
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -177,10 +178,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
  */
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp,
-                                 unsigned int *flagsp);
+                                 unsigned int *nbytesp);
 /*
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                         void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp,
-                                        unsigned int *flagsp);
+                                        unsigned int *nbytesp);
 
 /*
  * Support clean shutdown by allowing the caller to cancel
@@ -413,9 +408,11 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 
 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
-       (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+       (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+               (((idx) + (num)) & (nentries_mask))
 
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
                                ar->regs->ce_wrap_intr_sum_host_msi_lsb
index c84c2d30ef1f3f8c127eaca4baafee0d748f07d6..e94cb87380d280c246b517fff9f2f8b28b75bcd2 100644 (file)
@@ -60,10 +60,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+               .cal_data_len = 2116,
                .fw = {
                        .dir = QCA988X_HW_2_0_FW_DIR,
-                       .fw = QCA988X_HW_2_0_FW_FILE,
-                       .otp = QCA988X_HW_2_0_OTP_FILE,
                        .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
                        .board_size = QCA988X_BOARD_DATA_SZ,
                        .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
@@ -78,10 +77,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
+               .cal_data_len = 8124,
                .fw = {
                        .dir = QCA6174_HW_2_1_FW_DIR,
-                       .fw = QCA6174_HW_2_1_FW_FILE,
-                       .otp = QCA6174_HW_2_1_OTP_FILE,
                        .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -97,10 +95,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+               .cal_data_len = 8124,
                .fw = {
                        .dir = QCA6174_HW_2_1_FW_DIR,
-                       .fw = QCA6174_HW_2_1_FW_FILE,
-                       .otp = QCA6174_HW_2_1_OTP_FILE,
                        .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -116,10 +113,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+               .cal_data_len = 8124,
                .fw = {
                        .dir = QCA6174_HW_3_0_FW_DIR,
-                       .fw = QCA6174_HW_3_0_FW_FILE,
-                       .otp = QCA6174_HW_3_0_OTP_FILE,
                        .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -135,11 +131,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+               .cal_data_len = 8124,
                .fw = {
                        /* uses same binaries as hw3.0 */
                        .dir = QCA6174_HW_3_0_FW_DIR,
-                       .fw = QCA6174_HW_3_0_FW_FILE,
-                       .otp = QCA6174_HW_3_0_OTP_FILE,
                        .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -156,15 +151,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 150000,
                .max_probe_resp_desc_thres = 24,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
-               .num_msdu_desc = 1424,
-               .qcache_active_peers = 50,
                .tx_chain_mask = 0xf,
                .rx_chain_mask = 0xf,
                .max_spatial_stream = 4,
+               .cal_data_len = 12064,
                .fw = {
                        .dir = QCA99X0_HW_2_0_FW_DIR,
-                       .fw = QCA99X0_HW_2_0_FW_FILE,
-                       .otp = QCA99X0_HW_2_0_OTP_FILE,
                        .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
                        .board_size = QCA99X0_BOARD_DATA_SZ,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
@@ -179,10 +171,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
+               .cal_data_len = 8124,
                .fw = {
                        .dir = QCA9377_HW_1_0_FW_DIR,
-                       .fw = QCA9377_HW_1_0_FW_FILE,
-                       .otp = QCA9377_HW_1_0_OTP_FILE,
                        .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
                        .board_size = QCA9377_BOARD_DATA_SZ,
                        .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -197,10 +188,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
+               .cal_data_len = 8124,
                .fw = {
                        .dir = QCA9377_HW_1_0_FW_DIR,
-                       .fw = QCA9377_HW_1_0_FW_FILE,
-                       .otp = QCA9377_HW_1_0_OTP_FILE,
                        .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
                        .board_size = QCA9377_BOARD_DATA_SZ,
                        .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -217,15 +207,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .channel_counters_freq_hz = 125000,
                .max_probe_resp_desc_thres = 24,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
-               .num_msdu_desc = 2500,
-               .qcache_active_peers = 35,
                .tx_chain_mask = 0x3,
                .rx_chain_mask = 0x3,
                .max_spatial_stream = 2,
+               .cal_data_len = 12064,
                .fw = {
                        .dir = QCA4019_HW_1_0_FW_DIR,
-                       .fw = QCA4019_HW_1_0_FW_FILE,
-                       .otp = QCA4019_HW_1_0_OTP_FILE,
                        .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
                        .board_size = QCA4019_BOARD_DATA_SZ,
                        .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
@@ -274,7 +261,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
        int i;
 
        for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
-               if (test_bit(i, ar->fw_features)) {
+               if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
                        if (len > 0)
                                len += scnprintf(buf + len, buf_len - len, ",");
 
@@ -466,18 +453,18 @@ exit:
        return ret;
 }
 
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+                                   const struct firmware *file)
 {
        int ret;
 
-       if (!ar->cal_file)
+       if (!file)
                return -ENOENT;
 
-       if (IS_ERR(ar->cal_file))
-               return PTR_ERR(ar->cal_file);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
 
-       ret = ath10k_download_board_data(ar, ar->cal_file->data,
-                                        ar->cal_file->size);
+       ret = ath10k_download_board_data(ar, file->data, file->size);
        if (ret) {
                ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
                return ret;
@@ -488,7 +475,7 @@ static int ath10k_download_cal_file(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
 {
        struct device_node *node;
        int data_len;
@@ -502,13 +489,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
                 */
                return -ENOENT;
 
-       if (!of_get_property(node, "qcom,ath10k-calibration-data",
-                            &data_len)) {
+       if (!of_get_property(node, dt_name, &data_len)) {
                /* The calibration data node is optional */
                return -ENOENT;
        }
 
-       if (data_len != QCA988X_CAL_DATA_LEN) {
+       if (data_len != ar->hw_params.cal_data_len) {
                ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
                            data_len);
                ret = -EMSGSIZE;
@@ -521,8 +507,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
                goto out;
        }
 
-       ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
-                                       data, data_len);
+       ret = of_property_read_u8_array(node, dt_name, data, data_len);
        if (ret) {
                ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
                            ret);
@@ -553,7 +538,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 
        address = ar->hw_params.patch_load_addr;
 
-       if (!ar->otp_data || !ar->otp_len) {
+       if (!ar->normal_mode_fw.fw_file.otp_data ||
+           !ar->normal_mode_fw.fw_file.otp_len) {
                ath10k_warn(ar,
                            "failed to retrieve board id because of invalid otp\n");
                return -ENODATA;
@@ -561,9 +547,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                   "boot upload otp to 0x%x len %zd for board id\n",
-                  address, ar->otp_len);
+                  address, ar->normal_mode_fw.fw_file.otp_len);
 
-       ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+       ret = ath10k_bmi_fast_download(ar, address,
+                                      ar->normal_mode_fw.fw_file.otp_data,
+                                      ar->normal_mode_fw.fw_file.otp_len);
        if (ret) {
                ath10k_err(ar, "could not write otp for board id check: %d\n",
                           ret);
@@ -601,7 +589,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
        u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
        int ret;
 
-       ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+       ret = ath10k_download_board_data(ar,
+                                        ar->running_fw->board_data,
+                                        ar->running_fw->board_len);
        if (ret) {
                ath10k_err(ar, "failed to download board data: %d\n", ret);
                return ret;
@@ -609,16 +599,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
 
        /* OTP is optional */
 
-       if (!ar->otp_data || !ar->otp_len) {
+       if (!ar->running_fw->fw_file.otp_data ||
+           !ar->running_fw->fw_file.otp_len) {
                ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
-                           ar->otp_data, ar->otp_len);
+                           ar->running_fw->fw_file.otp_data,
+                           ar->running_fw->fw_file.otp_len);
                return 0;
        }
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
-                  address, ar->otp_len);
+                  address, ar->running_fw->fw_file.otp_len);
 
-       ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+       ret = ath10k_bmi_fast_download(ar, address,
+                                      ar->running_fw->fw_file.otp_data,
+                                      ar->running_fw->fw_file.otp_len);
        if (ret) {
                ath10k_err(ar, "could not write otp (%d)\n", ret);
                return ret;
@@ -633,7 +627,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
        if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-                                  ar->fw_features)) &&
+                                  ar->running_fw->fw_file.fw_features)) &&
            result != 0) {
                ath10k_err(ar, "otp calibration failed: %d", result);
                return -EINVAL;
@@ -642,46 +636,32 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
 {
        u32 address, data_len;
-       const char *mode_name;
        const void *data;
        int ret;
 
        address = ar->hw_params.patch_load_addr;
 
-       switch (mode) {
-       case ATH10K_FIRMWARE_MODE_NORMAL:
-               data = ar->firmware_data;
-               data_len = ar->firmware_len;
-               mode_name = "normal";
-               ret = ath10k_swap_code_seg_configure(ar,
-                                                    ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
-               if (ret) {
-                       ath10k_err(ar, "failed to configure fw code swap: %d\n",
-                                  ret);
-                       return ret;
-               }
-               break;
-       case ATH10K_FIRMWARE_MODE_UTF:
-               data = ar->testmode.utf_firmware_data;
-               data_len = ar->testmode.utf_firmware_len;
-               mode_name = "utf";
-               break;
-       default:
-               ath10k_err(ar, "unknown firmware mode: %d\n", mode);
-               return -EINVAL;
+       data = ar->running_fw->fw_file.firmware_data;
+       data_len = ar->running_fw->fw_file.firmware_len;
+
+       ret = ath10k_swap_code_seg_configure(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to configure fw code swap: %d\n",
+                          ret);
+               return ret;
        }
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot uploading firmware image %p len %d mode %s\n",
-                  data, data_len, mode_name);
+                  "boot uploading firmware image %p len %d\n",
+                  data, data_len);
 
        ret = ath10k_bmi_fast_download(ar, address, data, data_len);
        if (ret) {
-               ath10k_err(ar, "failed to download %s firmware: %d\n",
-                          mode_name, ret);
+               ath10k_err(ar, "failed to download firmware: %d\n",
+                          ret);
                return ret;
        }
 
@@ -690,34 +670,30 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
 
 static void ath10k_core_free_board_files(struct ath10k *ar)
 {
-       if (!IS_ERR(ar->board))
-               release_firmware(ar->board);
+       if (!IS_ERR(ar->normal_mode_fw.board))
+               release_firmware(ar->normal_mode_fw.board);
 
-       ar->board = NULL;
-       ar->board_data = NULL;
-       ar->board_len = 0;
+       ar->normal_mode_fw.board = NULL;
+       ar->normal_mode_fw.board_data = NULL;
+       ar->normal_mode_fw.board_len = 0;
 }
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-       if (!IS_ERR(ar->otp))
-               release_firmware(ar->otp);
-
-       if (!IS_ERR(ar->firmware))
-               release_firmware(ar->firmware);
+       if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+               release_firmware(ar->normal_mode_fw.fw_file.firmware);
 
        if (!IS_ERR(ar->cal_file))
                release_firmware(ar->cal_file);
 
        ath10k_swap_code_seg_release(ar);
 
-       ar->otp = NULL;
-       ar->otp_data = NULL;
-       ar->otp_len = 0;
+       ar->normal_mode_fw.fw_file.otp_data = NULL;
+       ar->normal_mode_fw.fw_file.otp_len = 0;
 
-       ar->firmware = NULL;
-       ar->firmware_data = NULL;
-       ar->firmware_len = 0;
+       ar->normal_mode_fw.fw_file.firmware = NULL;
+       ar->normal_mode_fw.fw_file.firmware_data = NULL;
+       ar->normal_mode_fw.fw_file.firmware_len = 0;
 
        ar->cal_file = NULL;
 }
@@ -726,6 +702,14 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
 {
        char filename[100];
 
+       /* pre-cal-<bus>-<id>.bin */
+       scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+                 ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+       ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+       if (!IS_ERR(ar->pre_cal_file))
+               goto success;
+
        /* cal-<bus>-<id>.bin */
        scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
                  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -734,7 +718,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
        if (IS_ERR(ar->cal_file))
                /* calibration file is optional, don't print any warnings */
                return PTR_ERR(ar->cal_file);
-
+success:
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
                   ATH10K_FW_DIR, filename);
 
@@ -748,14 +732,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
                return -EINVAL;
        }
 
-       ar->board = ath10k_fetch_fw_file(ar,
-                                        ar->hw_params.fw.dir,
-                                        ar->hw_params.fw.board);
-       if (IS_ERR(ar->board))
-               return PTR_ERR(ar->board);
+       ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+                                                       ar->hw_params.fw.dir,
+                                                       ar->hw_params.fw.board);
+       if (IS_ERR(ar->normal_mode_fw.board))
+               return PTR_ERR(ar->normal_mode_fw.board);
 
-       ar->board_data = ar->board->data;
-       ar->board_len = ar->board->size;
+       ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+       ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
 
        return 0;
 }
@@ -815,8 +799,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
                                   "boot found board data for '%s'",
                                   boardname);
 
-                       ar->board_data = board_ie_data;
-                       ar->board_len = board_ie_len;
+                       ar->normal_mode_fw.board_data = board_ie_data;
+                       ar->normal_mode_fw.board_len = board_ie_len;
 
                        ret = 0;
                        goto out;
@@ -849,12 +833,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
        const u8 *data;
        int ret, ie_id;
 
-       ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
-       if (IS_ERR(ar->board))
-               return PTR_ERR(ar->board);
+       ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+                                                       ar->hw_params.fw.dir,
+                                                       filename);
+       if (IS_ERR(ar->normal_mode_fw.board))
+               return PTR_ERR(ar->normal_mode_fw.board);
 
-       data = ar->board->data;
-       len = ar->board->size;
+       data = ar->normal_mode_fw.board->data;
+       len = ar->normal_mode_fw.board->size;
 
        /* magic has extra null byte padded */
        magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -921,7 +907,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
        }
 
 out:
-       if (!ar->board_data || !ar->board_len) {
+       if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
                ath10k_err(ar,
                           "failed to fetch board data for %s from %s/%s\n",
                           boardname, ar->hw_params.fw.dir, filename);
@@ -989,51 +975,8 @@ success:
        return 0;
 }
 
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
-       int ret = 0;
-
-       if (ar->hw_params.fw.fw == NULL) {
-               ath10k_err(ar, "firmware file not defined\n");
-               return -EINVAL;
-       }
-
-       ar->firmware = ath10k_fetch_fw_file(ar,
-                                           ar->hw_params.fw.dir,
-                                           ar->hw_params.fw.fw);
-       if (IS_ERR(ar->firmware)) {
-               ret = PTR_ERR(ar->firmware);
-               ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
-               goto err;
-       }
-
-       ar->firmware_data = ar->firmware->data;
-       ar->firmware_len = ar->firmware->size;
-
-       /* OTP may be undefined. If so, don't fetch it at all */
-       if (ar->hw_params.fw.otp == NULL)
-               return 0;
-
-       ar->otp = ath10k_fetch_fw_file(ar,
-                                      ar->hw_params.fw.dir,
-                                      ar->hw_params.fw.otp);
-       if (IS_ERR(ar->otp)) {
-               ret = PTR_ERR(ar->otp);
-               ath10k_err(ar, "could not fetch otp (%d)\n", ret);
-               goto err;
-       }
-
-       ar->otp_data = ar->otp->data;
-       ar->otp_len = ar->otp->size;
-
-       return 0;
-
-err:
-       ath10k_core_free_firmware_files(ar);
-       return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+                                    struct ath10k_fw_file *fw_file)
 {
        size_t magic_len, len, ie_len;
        int ie_id, i, index, bit, ret;
@@ -1042,15 +985,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        __le32 *timestamp, *version;
 
        /* first fetch the firmware file (firmware-*.bin) */
-       ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
-       if (IS_ERR(ar->firmware)) {
+       fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                                name);
+       if (IS_ERR(fw_file->firmware)) {
                ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
-                          ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
-               return PTR_ERR(ar->firmware);
+                          ar->hw_params.fw.dir, name,
+                          PTR_ERR(fw_file->firmware));
+               return PTR_ERR(fw_file->firmware);
        }
 
-       data = ar->firmware->data;
-       len = ar->firmware->size;
+       data = fw_file->firmware->data;
+       len = fw_file->firmware->size;
 
        /* magic also includes the null byte, check that as well */
        magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1093,15 +1038,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
 
                switch (ie_id) {
                case ATH10K_FW_IE_FW_VERSION:
-                       if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+                       if (ie_len > sizeof(fw_file->fw_version) - 1)
                                break;
 
-                       memcpy(ar->hw->wiphy->fw_version, data, ie_len);
-                       ar->hw->wiphy->fw_version[ie_len] = '\0';
+                       memcpy(fw_file->fw_version, data, ie_len);
+                       fw_file->fw_version[ie_len] = '\0';
 
                        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                                   "found fw version %s\n",
-                                   ar->hw->wiphy->fw_version);
+                                   fw_file->fw_version);
                        break;
                case ATH10K_FW_IE_TIMESTAMP:
                        if (ie_len != sizeof(u32))
@@ -1128,21 +1073,21 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                                        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                                                   "Enabling feature bit: %i\n",
                                                   i);
-                                       __set_bit(i, ar->fw_features);
+                                       __set_bit(i, fw_file->fw_features);
                                }
                        }
 
                        ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-                                       ar->fw_features,
-                                       sizeof(ar->fw_features));
+                                       ar->running_fw->fw_file.fw_features,
+                                       sizeof(fw_file->fw_features));
                        break;
                case ATH10K_FW_IE_FW_IMAGE:
                        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                                   "found fw image ie (%zd B)\n",
                                   ie_len);
 
-                       ar->firmware_data = data;
-                       ar->firmware_len = ie_len;
+                       fw_file->firmware_data = data;
+                       fw_file->firmware_len = ie_len;
 
                        break;
                case ATH10K_FW_IE_OTP_IMAGE:
@@ -1150,8 +1095,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                                   "found otp image ie (%zd B)\n",
                                   ie_len);
 
-                       ar->otp_data = data;
-                       ar->otp_len = ie_len;
+                       fw_file->otp_data = data;
+                       fw_file->otp_len = ie_len;
 
                        break;
                case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1160,10 +1105,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
 
                        version = (__le32 *)data;
 
-                       ar->wmi.op_version = le32_to_cpup(version);
+                       fw_file->wmi_op_version = le32_to_cpup(version);
 
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
-                                  ar->wmi.op_version);
+                                  fw_file->wmi_op_version);
                        break;
                case ATH10K_FW_IE_HTT_OP_VERSION:
                        if (ie_len != sizeof(u32))
@@ -1171,17 +1116,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
 
                        version = (__le32 *)data;
 
-                       ar->htt.op_version = le32_to_cpup(version);
+                       fw_file->htt_op_version = le32_to_cpup(version);
 
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
-                                  ar->htt.op_version);
+                                  fw_file->htt_op_version);
                        break;
                case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
                        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                                   "found fw code swap image ie (%zd B)\n",
                                   ie_len);
-                       ar->swap.firmware_codeswap_data = data;
-                       ar->swap.firmware_codeswap_len = ie_len;
+                       fw_file->codeswap_data = data;
+                       fw_file->codeswap_len = ie_len;
                        break;
                default:
                        ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1196,7 +1141,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                data += ie_len;
        }
 
-       if (!ar->firmware_data || !ar->firmware_len) {
+       if (!fw_file->firmware_data ||
+           !fw_file->firmware_len) {
                ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
                            ar->hw_params.fw.dir, name);
                ret = -ENOMEDIUM;
@@ -1220,40 +1166,95 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
        ar->fw_api = 5;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+                                              &ar->normal_mode_fw.fw_file);
        if (ret == 0)
                goto success;
 
        ar->fw_api = 4;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+                                              &ar->normal_mode_fw.fw_file);
        if (ret == 0)
                goto success;
 
        ar->fw_api = 3;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+                                              &ar->normal_mode_fw.fw_file);
        if (ret == 0)
                goto success;
 
        ar->fw_api = 2;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
-       if (ret == 0)
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+                                              &ar->normal_mode_fw.fw_file);
+       if (ret)
+               return ret;
+
+success:
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+       return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+       if (ret == 0) {
+               ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
                goto success;
+       }
 
-       ar->fw_api = 1;
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "boot did not find a pre calibration file, try DT next: %d\n",
+                  ret);
 
-       ret = ath10k_core_fetch_firmware_api_1(ar);
-       if (ret)
+       ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+       if (ret) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "unable to load pre cal data from DT: %d\n", ret);
                return ret;
+       }
+       ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
 
 success:
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+                  ath10k_cal_mode_str(ar->cal_mode));
+
+       return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_core_pre_cal_download(ar);
+       if (ret) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "failed to load pre cal data: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_core_get_board_id_from_otp(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to get board id: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_download_and_run_otp(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to run otp: %d\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "pre cal configuration done successfully\n");
 
        return 0;
 }
@@ -1262,7 +1263,15 @@ static int ath10k_download_cal_data(struct ath10k *ar)
 {
        int ret;
 
-       ret = ath10k_download_cal_file(ar);
+       ret = ath10k_core_pre_cal_config(ar);
+       if (ret == 0)
+               return 0;
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "pre cal download procedure failed, try cal file: %d\n",
+                  ret);
+
+       ret = ath10k_download_cal_file(ar, ar->cal_file);
        if (ret == 0) {
                ar->cal_mode = ATH10K_CAL_MODE_FILE;
                goto done;
@@ -1272,7 +1281,7 @@ static int ath10k_download_cal_data(struct ath10k *ar)
                   "boot did not find a calibration file, try DT next: %d\n",
                   ret);
 
-       ret = ath10k_download_cal_dt(ar);
+       ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
        if (ret == 0) {
                ar->cal_mode = ATH10K_CAL_MODE_DT;
                goto done;
@@ -1420,15 +1429,17 @@ static void ath10k_core_restart(struct work_struct *work)
 
 static int ath10k_core_init_firmware_features(struct ath10k *ar)
 {
-       if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
-           !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+       struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+           !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
                ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
                return -EINVAL;
        }
 
-       if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+       if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
                ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
-                          ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+                          ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
                return -EINVAL;
        }
 
@@ -1440,7 +1451,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                break;
        case ATH10K_CRYPT_MODE_SW:
                if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-                             ar->fw_features)) {
+                             fw_file->fw_features)) {
                        ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
                        return -EINVAL;
                }
@@ -1459,7 +1470,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
 
        if (rawmode) {
                if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-                             ar->fw_features)) {
+                             fw_file->fw_features)) {
                        ath10k_err(ar, "rawmode = 1 requires support from firmware");
                        return -EINVAL;
                }
@@ -1484,19 +1495,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        /* Backwards compatibility for firmwares without
         * ATH10K_FW_IE_WMI_OP_VERSION.
         */
-       if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
-               if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+       if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+               if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
                        if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
-                                    ar->fw_features))
-                               ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+                                    fw_file->fw_features))
+                               fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
                        else
-                               ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+                               fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
                } else {
-                       ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+                       fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
                }
        }
 
-       switch (ar->wmi.op_version) {
+       switch (fw_file->wmi_op_version) {
        case ATH10K_FW_WMI_OP_VERSION_MAIN:
                ar->max_num_peers = TARGET_NUM_PEERS;
                ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1509,7 +1520,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        case ATH10K_FW_WMI_OP_VERSION_10_1:
        case ATH10K_FW_WMI_OP_VERSION_10_2:
        case ATH10K_FW_WMI_OP_VERSION_10_2_4:
-               if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+               if (ath10k_peer_stats_enabled(ar)) {
                        ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
                        ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
                } else {
@@ -1538,9 +1549,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
                ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
                ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
-               ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc;
-               ar->fw_stats_req_mask = WMI_STAT_PEER;
+               ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+                                       WMI_10_4_STAT_PEER_EXTD;
                ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+               if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                            fw_file->fw_features))
+                       ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+               else
+                       ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1551,18 +1568,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        /* Backwards compatibility for firmwares without
         * ATH10K_FW_IE_HTT_OP_VERSION.
         */
-       if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
-               switch (ar->wmi.op_version) {
+       if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+               switch (fw_file->wmi_op_version) {
                case ATH10K_FW_WMI_OP_VERSION_MAIN:
-                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+                       fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
                        break;
                case ATH10K_FW_WMI_OP_VERSION_10_1:
                case ATH10K_FW_WMI_OP_VERSION_10_2:
                case ATH10K_FW_WMI_OP_VERSION_10_2_4:
-                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+                       fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
                        break;
                case ATH10K_FW_WMI_OP_VERSION_TLV:
-                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+                       fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
                        break;
                case ATH10K_FW_WMI_OP_VERSION_10_4:
                case ATH10K_FW_WMI_OP_VERSION_UNSET:
@@ -1575,14 +1592,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        return 0;
 }
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+                     const struct ath10k_fw_components *fw)
 {
        int status;
+       u32 val;
 
        lockdep_assert_held(&ar->conf_mutex);
 
        clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
 
+       ar->running_fw = fw;
+
        ath10k_bmi_start(ar);
 
        if (ath10k_init_configure_target(ar)) {
@@ -1601,7 +1622,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
         * to set the clock source once the target is initialized.
         */
        if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-                    ar->fw_features)) {
+                    ar->running_fw->fw_file.fw_features)) {
                status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
                if (status) {
                        ath10k_err(ar, "could not write to skip_clock_init: %d\n",
@@ -1610,7 +1631,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
                }
        }
 
-       status = ath10k_download_fw(ar, mode);
+       status = ath10k_download_fw(ar);
        if (status)
                goto err;
 
@@ -1698,6 +1719,20 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
                   ar->hw->wiphy->fw_version);
 
+       if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+               val = 0;
+               if (ath10k_peer_stats_enabled(ar))
+                       val = WMI_10_4_PEER_STATS;
+
+               status = ath10k_mac_ext_resource_config(ar, val);
+               if (status) {
+                       ath10k_err(ar,
+                                  "failed to send ext resource cfg command : %d\n",
+                                  status);
+                       goto err_hif_stop;
+               }
+       }
+
        status = ath10k_wmi_cmd_init(ar);
        if (status) {
                ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1832,13 +1867,27 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_power_down;
        }
 
+       BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+                    sizeof(ar->normal_mode_fw.fw_file.fw_version));
+       memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+              sizeof(ar->hw->wiphy->fw_version));
+
        ath10k_debug_print_hwfw_info(ar);
 
+       ret = ath10k_core_pre_cal_download(ar);
+       if (ret) {
+               /* pre calibration data download is not necessary
+                * for all the chipsets. Ignore failures and continue.
+                */
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "could not load pre cal data: %d\n", ret);
+       }
+
        ret = ath10k_core_get_board_id_from_otp(ar);
        if (ret && ret != -EOPNOTSUPP) {
                ath10k_err(ar, "failed to get board id from otp: %d\n",
                           ret);
-               return ret;
+               goto err_free_firmware_files;
        }
 
        ret = ath10k_core_fetch_board_file(ar);
@@ -1865,7 +1914,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
        mutex_lock(&ar->conf_mutex);
 
-       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+                               &ar->normal_mode_fw);
        if (ret) {
                ath10k_err(ar, "could not init core (%d)\n", ret);
                goto err_unlock;
@@ -2048,7 +2098,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
        mutex_init(&ar->conf_mutex);
        spin_lock_init(&ar->data_lock);
+       spin_lock_init(&ar->txqs_lock);
 
+       INIT_LIST_HEAD(&ar->txqs);
        INIT_LIST_HEAD(&ar->peers);
        init_waitqueue_head(&ar->peer_mapping_wq);
        init_waitqueue_head(&ar->htt.empty_tx_wq);
index a62b62a6226687e4f1ee09b8ba721588b79cbb9f..1379054000f92cdebab4589c5bed0d47790610d4 100644 (file)
@@ -44,8 +44,8 @@
 
 #define ATH10K_SCAN_ID 0
 #define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
 #define ATH10K_NUM_CHANS 39
 
 /* Antenna noise floor */
@@ -98,6 +98,7 @@ struct ath10k_skb_cb {
        u8 eid;
        u16 msdu_id;
        struct ieee80211_vif *vif;
+       struct ieee80211_txq *txq;
 } __packed;
 
 struct ath10k_skb_rxcb {
@@ -138,7 +139,6 @@ struct ath10k_mem_chunk {
 };
 
 struct ath10k_wmi {
-       enum ath10k_fw_wmi_op_version op_version;
        enum ath10k_htc_ep_id eid;
        struct completion service_ready;
        struct completion unified_ready;
@@ -297,6 +297,9 @@ struct ath10k_dfs_stats {
 
 struct ath10k_peer {
        struct list_head list;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+
        int vdev_id;
        u8 addr[ETH_ALEN];
        DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -305,6 +308,12 @@ struct ath10k_peer {
        struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
 };
 
+struct ath10k_txq {
+       struct list_head list;
+       unsigned long num_fw_queued;
+       unsigned long num_push_allowed;
+};
+
 struct ath10k_sta {
        struct ath10k_vif *arvif;
 
@@ -313,6 +322,7 @@ struct ath10k_sta {
        u32 bw;
        u32 nss;
        u32 smps;
+       u16 peer_id;
 
        struct work_struct update_wk;
 
@@ -323,7 +333,7 @@ struct ath10k_sta {
 #endif
 };
 
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
 
 enum ath10k_beacon_state {
        ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +345,7 @@ struct ath10k_vif {
        struct list_head list;
 
        u32 vdev_id;
+       u16 peer_id;
        enum wmi_vdev_type vdev_type;
        enum wmi_vdev_subtype vdev_subtype;
        u32 beacon_interval;
@@ -549,12 +560,17 @@ enum ath10k_dev_flags {
 
        /* Bluetooth coexistance enabled */
        ATH10K_FLAG_BTCOEX,
+
+       /* Per Station statistics service */
+       ATH10K_FLAG_PEER_STATS,
 };
 
 enum ath10k_cal_mode {
        ATH10K_CAL_MODE_FILE,
        ATH10K_CAL_MODE_OTP,
        ATH10K_CAL_MODE_DT,
+       ATH10K_PRE_CAL_MODE_FILE,
+       ATH10K_PRE_CAL_MODE_DT,
 };
 
 enum ath10k_crypt_mode {
@@ -573,6 +589,10 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
                return "otp";
        case ATH10K_CAL_MODE_DT:
                return "dt";
+       case ATH10K_PRE_CAL_MODE_FILE:
+               return "pre-cal-file";
+       case ATH10K_PRE_CAL_MODE_DT:
+               return "pre-cal-dt";
        }
 
        return "unknown";
@@ -606,6 +626,34 @@ enum ath10k_tx_pause_reason {
        ATH10K_TX_PAUSE_MAX,
 };
 
+struct ath10k_fw_file {
+       const struct firmware *firmware;
+
+       char fw_version[ETHTOOL_FWVERS_LEN];
+
+       DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+       enum ath10k_fw_wmi_op_version wmi_op_version;
+       enum ath10k_fw_htt_op_version htt_op_version;
+
+       const void *firmware_data;
+       size_t firmware_len;
+
+       const void *otp_data;
+       size_t otp_len;
+
+       const void *codeswap_data;
+       size_t codeswap_len;
+};
+
+struct ath10k_fw_components {
+       const struct firmware *board;
+       const void *board_data;
+       size_t board_len;
+
+       struct ath10k_fw_file fw_file;
+};
+
 struct ath10k {
        struct ath_common ath_common;
        struct ieee80211_hw *hw;
@@ -631,8 +679,6 @@ struct ath10k {
        /* protected by conf_mutex */
        bool ani_enabled;
 
-       DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
        bool p2p;
 
        struct {
@@ -680,39 +726,31 @@ struct ath10k {
                /* The padding bytes's location is different on various chips */
                enum ath10k_hw_4addr_pad hw_4addr_pad;
 
-               u32 num_msdu_desc;
-               u32 qcache_active_peers;
                u32 tx_chain_mask;
                u32 rx_chain_mask;
                u32 max_spatial_stream;
+               u32 cal_data_len;
 
                struct ath10k_hw_params_fw {
                        const char *dir;
-                       const char *fw;
-                       const char *otp;
                        const char *board;
                        size_t board_size;
                        size_t board_ext_size;
                } fw;
        } hw_params;
 
-       const struct firmware *board;
-       const void *board_data;
-       size_t board_len;
-
-       const struct firmware *otp;
-       const void *otp_data;
-       size_t otp_len;
+       /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+       struct ath10k_fw_components normal_mode_fw;
 
-       const struct firmware *firmware;
-       const void *firmware_data;
-       size_t firmware_len;
+       /* READ-ONLY images of the running firmware, which can be either
+        * normal or UTF. Do not modify, release etc!
+        */
+       const struct ath10k_fw_components *running_fw;
 
+       const struct firmware *pre_cal_file;
        const struct firmware *cal_file;
 
        struct {
-               const void *firmware_codeswap_data;
-               size_t firmware_codeswap_len;
                struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
        } swap;
 
@@ -744,7 +782,7 @@ struct ath10k {
        } scan;
 
        struct {
-               struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+               struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
        } mac;
 
        /* should never be NULL; needed for regular htt rx */
@@ -756,6 +794,9 @@ struct ath10k {
        /* current operating channel definition */
        struct cfg80211_chan_def chandef;
 
+       /* currently configured operating channel in firmware */
+       struct ieee80211_channel *tgt_oper_chan;
+
        unsigned long long free_vdev_map;
        struct ath10k_vif *monitor_arvif;
        bool monitor;
@@ -786,9 +827,13 @@ struct ath10k {
 
        /* protects shared structure data */
        spinlock_t data_lock;
+       /* protects: ar->txqs, artxq->list */
+       spinlock_t txqs_lock;
 
+       struct list_head txqs;
        struct list_head arvifs;
        struct list_head peers;
+       struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
        wait_queue_head_t peer_mapping_wq;
 
        /* protected by conf_mutex */
@@ -851,13 +896,8 @@ struct ath10k {
 
        struct {
                /* protected by conf_mutex */
-               const struct firmware *utf;
-               char utf_version[32];
-               const void *utf_firmware_data;
-               size_t utf_firmware_len;
-               DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
-               enum ath10k_fw_wmi_op_version orig_wmi_op_version;
-               enum ath10k_fw_wmi_op_version op_version;
+               struct ath10k_fw_components utf_mode_fw;
+
                /* protected by data_lock */
                bool utf_monitor;
        } testmode;
@@ -876,6 +916,15 @@ struct ath10k {
        u8 drv_priv[0] __aligned(sizeof(void *));
 };
 
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+       if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+           test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+               return true;
+
+       return false;
+}
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
                                  enum ath10k_bus bus,
                                  enum ath10k_hw_rev hw_rev,
@@ -884,8 +933,11 @@ void ath10k_core_destroy(struct ath10k *ar);
 void ath10k_core_get_fw_features_str(struct ath10k *ar,
                                     char *buf,
                                     size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+                                    struct ath10k_fw_file *fw_file);
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+                     const struct ath10k_fw_components *fw_components);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
 void ath10k_core_stop(struct ath10k *ar);
 int ath10k_core_register(struct ath10k *ar, u32 chip_id);
index 076d29b53ddff67a5b687dc6b722632db20d111c..e2511550fbb86c5fcafb88d0e6c1504bd0bd8337 100644 (file)
@@ -126,7 +126,9 @@ EXPORT_SYMBOL(ath10k_info);
 
 void ath10k_debug_print_hwfw_info(struct ath10k *ar)
 {
+       const struct firmware *firmware;
        char fw_features[128] = {};
+       u32 crc = 0;
 
        ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
@@ -143,11 +145,15 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
                    config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
                    config_enabled(CONFIG_NL80211_TESTMODE));
 
+       firmware = ar->normal_mode_fw.fw_file.firmware;
+       if (firmware)
+               crc = crc32_le(0, firmware->data, firmware->size);
+
        ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
                    ar->hw->wiphy->fw_version,
                    ar->fw_api,
                    fw_features,
-                   crc32_le(0, ar->firmware->data, ar->firmware->size));
+                   crc);
 }
 
 void ath10k_debug_print_board_info(struct ath10k *ar)
@@ -163,7 +169,8 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
        ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
                    ar->bd_api,
                    boardinfo,
-                   crc32_le(0, ar->board->data, ar->board->size));
+                   crc32_le(0, ar->normal_mode_fw.board->data,
+                            ar->normal_mode_fw.board->size));
 }
 
 void ath10k_debug_print_boot_info(struct ath10k *ar)
@@ -171,8 +178,8 @@ void ath10k_debug_print_boot_info(struct ath10k *ar)
        ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
                    ar->htt.target_version_major,
                    ar->htt.target_version_minor,
-                   ar->wmi.op_version,
-                   ar->htt.op_version,
+                   ar->normal_mode_fw.fw_file.wmi_op_version,
+                   ar->normal_mode_fw.fw_file.htt_op_version,
                    ath10k_cal_mode_str(ar->cal_mode),
                    ar->max_num_stations,
                    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
@@ -319,7 +326,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_fw_stats stats = {};
-       bool is_start, is_started, is_end, peer_stats_svc;
+       bool is_start, is_started, is_end;
        size_t num_peers;
        size_t num_vdevs;
        int ret;
@@ -346,13 +353,11 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
         *  b) consume stat update events until another one with pdev stats is
         *     delivered which is treated as end-of-data and is itself discarded
         */
-
-       peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
-       if (peer_stats_svc)
+       if (ath10k_peer_stats_enabled(ar))
                ath10k_sta_update_rx_duration(ar, &stats.peers);
 
        if (ar->debug.fw_stats_done) {
-               if (!peer_stats_svc)
+               if (!ath10k_peer_stats_enabled(ar))
                        ath10k_warn(ar, "received unsolicited stats update event\n");
 
                goto free;
@@ -1447,7 +1452,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
                goto err;
        }
 
-       buf = vmalloc(QCA988X_CAL_DATA_LEN);
+       buf = vmalloc(ar->hw_params.cal_data_len);
        if (!buf) {
                ret = -ENOMEM;
                goto err;
@@ -1462,7 +1467,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
        }
 
        ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
-                                  QCA988X_CAL_DATA_LEN);
+                                  ar->hw_params.cal_data_len);
        if (ret) {
                ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
                goto err_vfree;
@@ -1487,10 +1492,11 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
                                          char __user *user_buf,
                                          size_t count, loff_t *ppos)
 {
+       struct ath10k *ar = file->private_data;
        void *buf = file->private_data;
 
        return simple_read_from_buffer(user_buf, count, ppos,
-                                      buf, QCA988X_CAL_DATA_LEN);
+                                      buf, ar->hw_params.cal_data_len);
 }
 
 static int ath10k_debug_cal_data_release(struct inode *inode,
@@ -2019,7 +2025,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
                goto out;
        }
 
-       if (filter && (filter != ar->debug.pktlog_filter)) {
+       if (filter == ar->debug.pktlog_filter) {
+               ret = count;
+               goto out;
+       }
+
+       if (filter) {
                ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
                if (ret) {
                        ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
@@ -2114,7 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
        struct ath10k *ar = file->private_data;
        char buf[32];
        size_t buf_size;
-       int ret = 0;
+       int ret;
        bool val;
 
        buf_size = min(count, (sizeof(buf) - 1));
@@ -2134,8 +2145,10 @@ static ssize_t ath10k_write_btcoex(struct file *file,
                goto exit;
        }
 
-       if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val))
+       if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+               ret = count;
                goto exit;
+       }
 
        if (val)
                set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
@@ -2174,6 +2187,75 @@ static const struct file_operations fops_btcoex = {
        .open = simple_open
 };
 
+static ssize_t ath10k_write_peer_stats(struct file *file,
+                                      const char __user *ubuf,
+                                      size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       char buf[32];
+       size_t buf_size;
+       int ret;
+       bool val;
+
+       buf_size = min(count, (sizeof(buf) - 1));
+       if (copy_from_user(buf, ubuf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+
+       if (strtobool(buf, &val) != 0)
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_ON &&
+           ar->state != ATH10K_STATE_RESTARTED) {
+               ret = -ENETDOWN;
+               goto exit;
+       }
+
+       if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+               ret = count;
+               goto exit;
+       }
+
+       if (val)
+               set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+       else
+               clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+       ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+       queue_work(ar->workqueue, &ar->restart_work);
+       ret = count;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+                                     size_t count, loff_t *ppos)
+
+{
+       char buf[32];
+       struct ath10k *ar = file->private_data;
+       int len = 0;
+
+       mutex_lock(&ar->conf_mutex);
+       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+                       test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+       mutex_unlock(&ar->conf_mutex);
+
+       return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+       .read = ath10k_read_peer_stats,
+       .write = ath10k_write_peer_stats,
+       .open = simple_open
+};
+
 static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
                                              char __user *user_buf,
                                              size_t count, loff_t *ppos)
@@ -2191,23 +2273,28 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
 
        len += scnprintf(buf + len, buf_len - len,
                         "firmware-N.bin\t\t%08x\n",
-                        crc32_le(0, ar->firmware->data, ar->firmware->size));
+                        crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+                                 ar->normal_mode_fw.fw_file.firmware->size));
        len += scnprintf(buf + len, buf_len - len,
                         "athwlan\t\t\t%08x\n",
-                        crc32_le(0, ar->firmware_data, ar->firmware_len));
+                        crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+                                 ar->normal_mode_fw.fw_file.firmware_len));
        len += scnprintf(buf + len, buf_len - len,
                         "otp\t\t\t%08x\n",
-                        crc32_le(0, ar->otp_data, ar->otp_len));
+                        crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+                                 ar->normal_mode_fw.fw_file.otp_len));
        len += scnprintf(buf + len, buf_len - len,
                         "codeswap\t\t%08x\n",
-                        crc32_le(0, ar->swap.firmware_codeswap_data,
-                                 ar->swap.firmware_codeswap_len));
+                        crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+                                 ar->normal_mode_fw.fw_file.codeswap_len));
        len += scnprintf(buf + len, buf_len - len,
                         "board-N.bin\t\t%08x\n",
-                        crc32_le(0, ar->board->data, ar->board->size));
+                        crc32_le(0, ar->normal_mode_fw.board->data,
+                                 ar->normal_mode_fw.board->size));
        len += scnprintf(buf + len, buf_len - len,
                         "board\t\t\t%08x\n",
-                        crc32_le(0, ar->board_data, ar->board_len));
+                        crc32_le(0, ar->normal_mode_fw.board_data,
+                                 ar->normal_mode_fw.board_len));
 
        ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 
@@ -2337,6 +2424,11 @@ int ath10k_debug_register(struct ath10k *ar)
                debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
                                    ar->debug.debugfs_phy, ar, &fops_btcoex);
 
+       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+               debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+                                   ar->debug.debugfs_phy, ar,
+                                   &fops_peer_stats);
+
        debugfs_create_file("fw_checksums", S_IRUSR,
                            ar->debug.debugfs_phy, ar, &fops_fw_checksums);
 
index 6206edd7c49f0a6a0ff85f09c608118f38923729..75c89e3625ef6ffb1a926e5df9acba34d342fb9a 100644 (file)
@@ -57,7 +57,7 @@ enum ath10k_dbg_aggr_mode {
 };
 
 /* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
 
 extern unsigned int ath10k_debug_mask;
 
index e70aa38e6e05dbb1611981d5c26b332d05ac0ce5..cc827185d3e9145a82b087bf235d0e57c2adbea7 100644 (file)
@@ -297,10 +297,10 @@ struct ath10k_htc_svc_conn_resp {
 #define ATH10K_NUM_CONTROL_TX_BUFFERS 2
 #define ATH10K_HTC_MAX_LEN 4096
 #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
-#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
 #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
                                        sizeof(struct ath10k_htc_hdr))
-#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
 
 struct ath10k_htc_ep {
        struct ath10k_htc *htc;
index 7561f22f10f9c92266e4e73d6826f465acd0f361..130cd9502021d0204f54e294a8e55f81481a0610 100644 (file)
@@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
        memset(&conn_resp, 0, sizeof(conn_resp));
 
        conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
-       conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
 
        /* connect to control service */
        conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar)
                8 + /* llc snap */
                2; /* ip4 dscp or ip6 priority */
 
-       switch (ar->htt.op_version) {
+       switch (ar->running_fw->fw_file.htt_op_version) {
        case ATH10K_FW_HTT_OP_VERSION_10_4:
                ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
                ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
@@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar)
        return 0;
 }
 
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
 
 static int ath10k_htt_verify_version(struct ath10k_htt *htt)
 {
index 13391ea4422d3f5acf0adf3d0e10ed35a3b535b1..911c535d08634e53a58ac7c0cf16c23b269859ce 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
 #include <linux/hashtable.h>
+#include <linux/kfifo.h>
 #include <net/mac80211.h>
 
 #include "htc.h"
@@ -1461,15 +1462,23 @@ struct htt_tx_mode_switch_ind {
        struct htt_tx_mode_switch_record records[0];
 } __packed;
 
+struct htt_channel_change {
+       u8 pad[3];
+       __le32 freq;
+       __le32 center_freq1;
+       __le32 center_freq2;
+       __le32 phymode;
+} __packed;
+
 union htt_rx_pn_t {
        /* WEP: 24-bit PN */
        u32 pn24;
 
        /* TKIP or CCMP: 48-bit PN */
-       u_int64_t pn48;
+       u64 pn48;
 
        /* WAPI: 128-bit PN */
-       u_int64_t pn128[2];
+       u64 pn128[2];
 };
 
 struct htt_cmd {
@@ -1511,16 +1520,22 @@ struct htt_resp {
                struct htt_tx_fetch_ind tx_fetch_ind;
                struct htt_tx_fetch_confirm tx_fetch_confirm;
                struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+               struct htt_channel_change chan_change;
        };
 } __packed;
 
 /*** host side structures follow ***/
 
 struct htt_tx_done {
-       u32 msdu_id;
-       bool discard;
-       bool no_ack;
-       bool success;
+       u16 msdu_id;
+       u16 status;
+};
+
+enum htt_tx_compl_state {
+       HTT_TX_COMPL_STATE_NONE,
+       HTT_TX_COMPL_STATE_ACK,
+       HTT_TX_COMPL_STATE_NOACK,
+       HTT_TX_COMPL_STATE_DISCARD,
 };
 
 struct htt_peer_map_event {
@@ -1547,7 +1562,6 @@ struct ath10k_htt {
        u8 target_version_major;
        u8 target_version_minor;
        struct completion target_version_received;
-       enum ath10k_fw_htt_op_version op_version;
        u8 max_num_amsdu;
        u8 max_num_ampdu;
 
@@ -1641,17 +1655,20 @@ struct ath10k_htt {
        struct idr pending_tx;
        wait_queue_head_t empty_tx_wq;
 
+       /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+       DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
        /* set if host-fw communication goes haywire
         * used to avoid further failures */
        bool rx_confused;
-       struct tasklet_struct rx_replenish_task;
+       atomic_t num_mpdus_ready;
 
        /* This is used to group tx/rx completions separately and process them
         * in batches to reduce cache stalls */
        struct tasklet_struct txrx_compl_task;
-       struct sk_buff_head tx_compl_q;
        struct sk_buff_head rx_compl_q;
        struct sk_buff_head rx_in_ord_compl_q;
+       struct sk_buff_head tx_fetch_ind_q;
 
        /* rx_status template */
        struct ieee80211_rx_status rx_status;
@@ -1667,10 +1684,13 @@ struct ath10k_htt {
        } txbuf;
 
        struct {
+               bool enabled;
                struct htt_q_state *vaddr;
                dma_addr_t paddr;
+               u16 num_push_allowed;
                u16 num_peers;
                u16 num_tids;
+               enum htt_tx_mode_switch_mode mode;
                enum htt_q_depth_type type;
        } tx_q_state;
 };
@@ -1715,7 +1735,7 @@ struct htt_rx_desc {
 
 /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
  * aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
 
 /*
  * DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1743,7 +1763,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar);
 void ath10k_htt_rx_free(struct ath10k_htt *htt);
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1752,8 +1773,23 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
                                u8 max_subfrms_amsdu);
 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+                            __le32 token,
+                            __le16 fetch_seq_num,
+                            struct htt_tx_fetch_record *records,
+                            size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+                                  bool is_presp);
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
index feab80a5b6eb2177bb02de25870e301e545a8651..cc979a4faeb02621b230842dc8cef71a55783bdf 100644 (file)
@@ -31,6 +31,8 @@
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
 
@@ -192,7 +194,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
                          msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
        } else if (num_deficit > 0) {
-               tasklet_schedule(&htt->rx_replenish_task);
+               mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+                         msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
        }
        spin_unlock_bh(&htt->rx_ring.lock);
 }
@@ -223,12 +226,11 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
-       tasklet_kill(&htt->rx_replenish_task);
        tasklet_kill(&htt->txrx_compl_task);
 
-       skb_queue_purge(&htt->tx_compl_q);
        skb_queue_purge(&htt->rx_compl_q);
        skb_queue_purge(&htt->rx_in_ord_compl_q);
+       skb_queue_purge(&htt->tx_fetch_ind_q);
 
        ath10k_htt_rx_ring_free(htt);
 
@@ -281,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 
 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
-                                  u8 **fw_desc, int *fw_desc_len,
                                   struct sk_buff_head *amsdu)
 {
        struct ath10k *ar = htt->ar;
@@ -323,48 +324,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                        return -EIO;
                }
 
-               /*
-                * Copy the FW rx descriptor for this MSDU from the rx
-                * indication message into the MSDU's netbuf. HL uses the
-                * same rx indication message definition as LL, and simply
-                * appends new info (fields from the HW rx desc, and the
-                * MSDU payload itself). So, the offset into the rx
-                * indication message only has to account for the standard
-                * offset of the per-MSDU FW rx desc info within the
-                * message, and how many bytes of the per-MSDU FW rx desc
-                * info have already been consumed. (And the endianness of
-                * the host, since for a big-endian host, the rx ind
-                * message contents, including the per-MSDU rx desc bytes,
-                * were byteswapped during upload.)
-                */
-               if (*fw_desc_len > 0) {
-                       rx_desc->fw_desc.info0 = **fw_desc;
-                       /*
-                        * The target is expected to only provide the basic
-                        * per-MSDU rx descriptors. Just to be sure, verify
-                        * that the target has not attached extension data
-                        * (e.g. LRO flow ID).
-                        */
-
-                       /* or more, if there's extension data */
-                       (*fw_desc)++;
-                       (*fw_desc_len)--;
-               } else {
-                       /*
-                        * When an oversized AMSDU happened, FW will lost
-                        * some of MSDU status - in this case, the FW
-                        * descriptors provided will be less than the
-                        * actual MSDUs inside this MPDU. Mark the FW
-                        * descriptors so that it will still deliver to
-                        * upper stack, if no CRC error for this MPDU.
-                        *
-                        * FIX THIS - the FW descriptors are actually for
-                        * MSDUs in the end of this A-MSDU instead of the
-                        * beginning.
-                        */
-                       rx_desc->fw_desc.info0 = 0;
-               }
-
                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +382,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        return msdu_chaining;
 }
 
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
-       struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
-       ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
                                               u32 paddr)
 {
@@ -563,12 +515,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        htt->rx_ring.sw_rd_idx.msdu_payld = 0;
        hash_init(htt->rx_ring.skb_table);
 
-       tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
-                    (unsigned long)htt);
-
-       skb_queue_head_init(&htt->tx_compl_q);
        skb_queue_head_init(&htt->rx_compl_q);
        skb_queue_head_init(&htt->rx_in_ord_compl_q);
+       skb_queue_head_init(&htt->tx_fetch_ind_q);
+       atomic_set(&htt->num_mpdus_ready, 0);
 
        tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
                     (unsigned long)htt);
@@ -860,6 +810,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
                ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
        if (!ch)
                ch = ath10k_htt_rx_h_any_channel(ar);
+       if (!ch)
+               ch = ar->tgt_oper_chan;
        spin_unlock_bh(&ar->data_lock);
 
        if (!ch)
@@ -1014,7 +966,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
        int len = ieee80211_hdrlen(hdr->frame_control);
 
        if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
-                     ar->fw_features))
+                     ar->running_fw->fw_file.fw_features))
                len = round_up(len, 4);
 
        return len;
@@ -1076,20 +1028,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        hdr = (void *)msdu->data;
 
        /* Tail */
-       skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+       if (status->flag & RX_FLAG_IV_STRIPPED)
+               skb_trim(msdu, msdu->len -
+                        ath10k_htt_rx_crypto_tail_len(ar, enctype));
 
        /* MMIC */
-       if (!ieee80211_has_morefrags(hdr->frame_control) &&
+       if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+           !ieee80211_has_morefrags(hdr->frame_control) &&
            enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
                skb_trim(msdu, msdu->len - 8);
 
        /* Head */
-       hdr_len = ieee80211_hdrlen(hdr->frame_control);
-       crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+       if (status->flag & RX_FLAG_IV_STRIPPED) {
+               hdr_len = ieee80211_hdrlen(hdr->frame_control);
+               crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
 
-       memmove((void *)msdu->data + crypto_len,
-               (void *)msdu->data, hdr_len);
-       skb_pull(msdu, crypto_len);
+               memmove((void *)msdu->data + crypto_len,
+                       (void *)msdu->data, hdr_len);
+               skb_pull(msdu, crypto_len);
+       }
 }
 
 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1343,6 +1300,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        bool has_tkip_err;
        bool has_peer_idx_invalid;
        bool is_decrypted;
+       bool is_mgmt;
        u32 attention;
 
        if (skb_queue_empty(amsdu))
@@ -1351,6 +1309,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        first = skb_peek(amsdu);
        rxd = (void *)first->data - sizeof(*rxd);
 
+       is_mgmt = !!(rxd->attention.flags &
+                    __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
                     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 
@@ -1392,6 +1353,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
                          RX_FLAG_MMIC_ERROR |
                          RX_FLAG_DECRYPTED |
                          RX_FLAG_IV_STRIPPED |
+                         RX_FLAG_ONLY_MONITOR |
                          RX_FLAG_MMIC_STRIPPED);
 
        if (has_fcs_err)
@@ -1400,10 +1362,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        if (has_tkip_err)
                status->flag |= RX_FLAG_MMIC_ERROR;
 
-       if (is_decrypted)
-               status->flag |= RX_FLAG_DECRYPTED |
-                               RX_FLAG_IV_STRIPPED |
-                               RX_FLAG_MMIC_STRIPPED;
+       /* Firmware reports all necessary management frames via WMI already.
+        * They are not reported to monitor interfaces at all so pass the ones
+        * coming via HTT to monitor interfaces instead. This simplifies
+        * matters a lot.
+        */
+       if (is_mgmt)
+               status->flag |= RX_FLAG_ONLY_MONITOR;
+
+       if (is_decrypted) {
+               status->flag |= RX_FLAG_DECRYPTED;
+
+               if (likely(!is_mgmt))
+                       status->flag |= RX_FLAG_IV_STRIPPED |
+                                       RX_FLAG_MMIC_STRIPPED;
+}
 
        skb_queue_walk(amsdu, msdu) {
                ath10k_htt_rx_h_csum_offload(msdu);
@@ -1416,6 +1389,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
                 */
                if (!is_decrypted)
                        continue;
+               if (is_mgmt)
+                       continue;
 
                hdr = (void *)msdu->data;
                hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1516,14 +1491,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
                                        struct sk_buff_head *amsdu,
                                        struct ieee80211_rx_status *rx_status)
 {
-       struct sk_buff *msdu;
-       struct htt_rx_desc *rxd;
-       bool is_mgmt;
-       bool has_fcs_err;
-
-       msdu = skb_peek(amsdu);
-       rxd = (void *)msdu->data - sizeof(*rxd);
-
        /* FIXME: It might be a good idea to do some fuzzy-testing to drop
         * invalid/dangerous frames.
         */
@@ -1533,23 +1500,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
                return false;
        }
 
-       is_mgmt = !!(rxd->attention.flags &
-                    __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
-       has_fcs_err = !!(rxd->attention.flags &
-                        __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
-       /* Management frames are handled via WMI events. The pros of such
-        * approach is that channel is explicitly provided in WMI events
-        * whereas HTT doesn't provide channel information for Rxed frames.
-        *
-        * However some firmware revisions don't report corrupted frames via
-        * WMI so don't drop them.
-        */
-       if (is_mgmt && !has_fcs_err) {
-               ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
-               return false;
-       }
-
        if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
                return false;
@@ -1571,25 +1521,49 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
        __skb_queue_purge(amsdu);
 }
 
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
-                                 struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
-       struct ieee80211_rx_status *rx_status = &htt->rx_status;
-       struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       static struct ieee80211_rx_status rx_status;
        struct sk_buff_head amsdu;
-       int num_mpdu_ranges;
-       int fw_desc_len;
-       u8 *fw_desc;
-       int i, ret, mpdu_count = 0;
+       int ret;
 
-       lockdep_assert_held(&htt->rx_ring.lock);
+       __skb_queue_head_init(&amsdu);
 
-       if (htt->rx_confused)
-               return;
+       spin_lock_bh(&htt->rx_ring.lock);
+       if (htt->rx_confused) {
+               spin_unlock_bh(&htt->rx_ring.lock);
+               return -EIO;
+       }
+       ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+       spin_unlock_bh(&htt->rx_ring.lock);
 
-       fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
-       fw_desc = (u8 *)&rx->fw_desc;
+       if (ret < 0) {
+               ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+               __skb_queue_purge(&amsdu);
+               /* FIXME: It's probably a good idea to reboot the
+                * device instead of leaving it inoperable.
+                */
+               htt->rx_confused = true;
+               return ret;
+       }
+
+       ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+       ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+       ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
+       ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
+       ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+
+       return 0;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+                                     struct htt_rx_indication *rx)
+{
+       struct ath10k *ar = htt->ar;
+       struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       int num_mpdu_ranges;
+       int i, mpdu_count = 0;
 
        num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1603,80 +1577,19 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
        for (i = 0; i < num_mpdu_ranges; i++)
                mpdu_count += mpdu_ranges[i].mpdu_count;
 
-       while (mpdu_count--) {
-               __skb_queue_head_init(&amsdu);
-               ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
-                                             &fw_desc_len, &amsdu);
-               if (ret < 0) {
-                       ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
-                       __skb_queue_purge(&amsdu);
-                       /* FIXME: It's probably a good idea to reboot the
-                        * device instead of leaving it inoperable.
-                        */
-                       htt->rx_confused = true;
-                       break;
-               }
+       atomic_add(mpdu_count, &htt->num_mpdus_ready);
 
-               ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
-               ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
-               ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-               ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
-               ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-       }
-
-       tasklet_schedule(&htt->rx_replenish_task);
+       tasklet_schedule(&htt->txrx_compl_task);
 }
 
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
-                                      struct htt_rx_fragment_indication *frag)
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
 {
-       struct ath10k *ar = htt->ar;
-       struct ieee80211_rx_status *rx_status = &htt->rx_status;
-       struct sk_buff_head amsdu;
-       int ret;
-       u8 *fw_desc;
-       int fw_desc_len;
-
-       fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
-       fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
-       __skb_queue_head_init(&amsdu);
+       atomic_inc(&htt->num_mpdus_ready);
 
-       spin_lock_bh(&htt->rx_ring.lock);
-       ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
-                                     &amsdu);
-       spin_unlock_bh(&htt->rx_ring.lock);
-
-       tasklet_schedule(&htt->rx_replenish_task);
-
-       ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
-
-       if (ret) {
-               ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
-                           ret);
-               __skb_queue_purge(&amsdu);
-               return;
-       }
-
-       if (skb_queue_len(&amsdu) != 1) {
-               ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
-               __skb_queue_purge(&amsdu);
-               return;
-       }
-
-       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
-       ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
-       ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-
-       if (fw_desc_len > 0) {
-               ath10k_dbg(ar, ATH10K_DBG_HTT,
-                          "expecting more fragmented rx in one indication %d\n",
-                          fw_desc_len);
-       }
+       tasklet_schedule(&htt->txrx_compl_task);
 }
 
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
                                       struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
@@ -1688,19 +1601,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
 
        switch (status) {
        case HTT_DATA_TX_STATUS_NO_ACK:
-               tx_done.no_ack = true;
+               tx_done.status = HTT_TX_COMPL_STATE_NOACK;
                break;
        case HTT_DATA_TX_STATUS_OK:
-               tx_done.success = true;
+               tx_done.status = HTT_TX_COMPL_STATE_ACK;
                break;
        case HTT_DATA_TX_STATUS_DISCARD:
        case HTT_DATA_TX_STATUS_POSTPONE:
        case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
-               tx_done.discard = true;
+               tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
                break;
        default:
                ath10k_warn(ar, "unhandled tx completion status %d\n", status);
-               tx_done.discard = true;
+               tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
                break;
        }
 
@@ -1710,7 +1623,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
        for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
                msdu_id = resp->data_tx_completion.msdus[i];
                tx_done.msdu_id = __le16_to_cpu(msdu_id);
-               ath10k_txrx_tx_unref(htt, &tx_done);
+
+               /* kfifo_put: In practice firmware shouldn't fire off per-CE
+                * interrupt and main interrupt (MSI/-X range case) for the same
+                * HTC service so it should be safe to use kfifo_put w/o lock.
+                *
+                * From kfifo_put() documentation:
+                *  Note that with only one concurrent reader and one concurrent
+                *  writer, you don't need extra locking to use these macro.
+                */
+               if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+                       ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+                                   tx_done.msdu_id, tx_done.status);
+                       ath10k_txrx_tx_unref(htt, &tx_done);
+               }
        }
 }
 
@@ -1978,11 +1904,324 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        return;
                }
        }
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+                                                  const __le32 *resp_ids,
+                                                  int num_resp_ids)
+{
+       int i;
+       u32 resp_id;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+                  num_resp_ids);
+
+       for (i = 0; i < num_resp_ids; i++) {
+               resp_id = le32_to_cpu(resp_ids[i]);
+
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+                          resp_id);
+
+               /* TODO: free resp_id */
+       }
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ieee80211_hw *hw = ar->hw;
+       struct ieee80211_txq *txq;
+       struct htt_resp *resp = (struct htt_resp *)skb->data;
+       struct htt_tx_fetch_record *record;
+       size_t len;
+       size_t max_num_bytes;
+       size_t max_num_msdus;
+       size_t num_bytes;
+       size_t num_msdus;
+       const __le32 *resp_ids;
+       u16 num_records;
+       u16 num_resp_ids;
+       u16 peer_id;
+       u8 tid;
+       int ret;
+       int i;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+       len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+               return;
+       }
+
+       num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+       num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+       len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+       len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+               return;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+                  num_records, num_resp_ids,
+                  le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+       if (!ar->htt.tx_q_state.enabled) {
+               ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+               return;
+       }
+
+       if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+               ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+               return;
+       }
+
+       rcu_read_lock();
+
+       for (i = 0; i < num_records; i++) {
+               record = &resp->tx_fetch_ind.records[i];
+               peer_id = MS(le16_to_cpu(record->info),
+                            HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+               tid = MS(le16_to_cpu(record->info),
+                        HTT_TX_FETCH_RECORD_INFO_TID);
+               max_num_msdus = le16_to_cpu(record->num_msdus);
+               max_num_bytes = le32_to_cpu(record->num_bytes);
+
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+                          i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+               if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+                   unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+                       ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+                                   peer_id, tid);
+                       continue;
+               }
+
+               spin_lock_bh(&ar->data_lock);
+               txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+               spin_unlock_bh(&ar->data_lock);
+
+               /* It is okay to release the lock and use txq because RCU read
+                * lock is held.
+                */
+
+               if (unlikely(!txq)) {
+                       ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+                                   peer_id, tid);
+                       continue;
+               }
+
+               num_msdus = 0;
+               num_bytes = 0;
+
+               while (num_msdus < max_num_msdus &&
+                      num_bytes < max_num_bytes) {
+                       ret = ath10k_mac_tx_push_txq(hw, txq);
+                       if (ret < 0)
+                               break;
+
+                       num_msdus++;
+                       num_bytes += ret;
+               }
+
+               record->num_msdus = cpu_to_le16(num_msdus);
+               record->num_bytes = cpu_to_le32(num_bytes);
+
+               ath10k_htt_tx_txq_recalc(hw, txq);
+       }
+
+       rcu_read_unlock();
+
+       resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+       ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+       ret = ath10k_htt_tx_fetch_resp(ar,
+                                      resp->tx_fetch_ind.token,
+                                      resp->tx_fetch_ind.fetch_seq_num,
+                                      resp->tx_fetch_ind.records,
+                                      num_records);
+       if (unlikely(ret)) {
+               ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+                           le32_to_cpu(resp->tx_fetch_ind.token), ret);
+               /* FIXME: request fw restart */
+       }
 
-       tasklet_schedule(&htt->rx_replenish_task);
+       ath10k_htt_tx_txq_sync(ar);
 }
 
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       const struct htt_resp *resp = (void *)skb->data;
+       size_t len;
+       int num_resp_ids;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+       len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+               return;
+       }
+
+       num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+       len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+               return;
+       }
+
+       ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+                                              resp->tx_fetch_confirm.resp_ids,
+                                              num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       const struct htt_resp *resp = (void *)skb->data;
+       const struct htt_tx_mode_switch_record *record;
+       struct ieee80211_txq *txq;
+       struct ath10k_txq *artxq;
+       size_t len;
+       size_t num_records;
+       enum htt_tx_mode_switch_mode mode;
+       bool enable;
+       u16 info0;
+       u16 info1;
+       u16 threshold;
+       u16 peer_id;
+       u8 tid;
+       int i;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+       len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+               return;
+       }
+
+       info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+       info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+       enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+       num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+       mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+       threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT,
+                  "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+                  info0, info1, enable, num_records, mode, threshold);
+
+       len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+       if (unlikely(skb->len < len)) {
+               ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+               return;
+       }
+
+       switch (mode) {
+       case HTT_TX_MODE_SWITCH_PUSH:
+       case HTT_TX_MODE_SWITCH_PUSH_PULL:
+               break;
+       default:
+               ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+                           mode);
+               return;
+       }
+
+       if (!enable)
+               return;
+
+       ar->htt.tx_q_state.enabled = enable;
+       ar->htt.tx_q_state.mode = mode;
+       ar->htt.tx_q_state.num_push_allowed = threshold;
+
+       rcu_read_lock();
+
+       for (i = 0; i < num_records; i++) {
+               record = &resp->tx_mode_switch_ind.records[i];
+               info0 = le16_to_cpu(record->info0);
+               peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+               tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+               if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+                   unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+                       ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+                                   peer_id, tid);
+                       continue;
+               }
+
+               spin_lock_bh(&ar->data_lock);
+               txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+               spin_unlock_bh(&ar->data_lock);
+
+               /* It is okay to release the lock and use txq because RCU read
+                * lock is held.
+                */
+
+               if (unlikely(!txq)) {
+                       ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+                                   peer_id, tid);
+                       continue;
+               }
+
+               spin_lock_bh(&ar->htt.tx_lock);
+               artxq = (void *)txq->drv_priv;
+               artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+               spin_unlock_bh(&ar->htt.tx_lock);
+       }
+
+       rcu_read_unlock();
+
+       ath10k_mac_tx_push_pending(ar);
+}
+
+static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
+{
+       enum nl80211_band band;
+
+       switch (phy_mode) {
+       case MODE_11A:
+       case MODE_11NA_HT20:
+       case MODE_11NA_HT40:
+       case MODE_11AC_VHT20:
+       case MODE_11AC_VHT40:
+       case MODE_11AC_VHT80:
+               band = NL80211_BAND_5GHZ;
+               break;
+       case MODE_11G:
+       case MODE_11B:
+       case MODE_11GONLY:
+       case MODE_11NG_HT20:
+       case MODE_11NG_HT40:
+       case MODE_11AC_VHT20_2G:
+       case MODE_11AC_VHT40_2G:
+       case MODE_11AC_VHT80_2G:
+       default:
+               band = NL80211_BAND_2GHZ;
+       }
+
+       return band;
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+       bool release;
+
+       release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+       /* Free the indication buffer */
+       if (release)
+               dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
        struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1998,8 +2237,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
                           resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
-               dev_kfree_skb_any(skb);
-               return;
+               return true;
        }
        type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
 
@@ -2011,9 +2249,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        }
        case HTT_T2H_MSG_TYPE_RX_IND:
-               skb_queue_tail(&htt->rx_compl_q, skb);
-               tasklet_schedule(&htt->txrx_compl_task);
-               return;
+               ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+               break;
        case HTT_T2H_MSG_TYPE_PEER_MAP: {
                struct htt_peer_map_event ev = {
                        .vdev_id = resp->peer_map.vdev_id,
@@ -2034,28 +2271,33 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                struct htt_tx_done tx_done = {};
                int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
 
-               tx_done.msdu_id =
-                       __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+               tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
 
                switch (status) {
                case HTT_MGMT_TX_STATUS_OK:
-                       tx_done.success = true;
+                       tx_done.status = HTT_TX_COMPL_STATE_ACK;
                        break;
                case HTT_MGMT_TX_STATUS_RETRY:
-                       tx_done.no_ack = true;
+                       tx_done.status = HTT_TX_COMPL_STATE_NOACK;
                        break;
                case HTT_MGMT_TX_STATUS_DROP:
-                       tx_done.discard = true;
+                       tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
                        break;
                }
 
-               ath10k_txrx_tx_unref(htt, &tx_done);
+               status = ath10k_txrx_tx_unref(htt, &tx_done);
+               if (!status) {
+                       spin_lock_bh(&htt->tx_lock);
+                       ath10k_htt_tx_mgmt_dec_pending(htt);
+                       spin_unlock_bh(&htt->tx_lock);
+               }
+               ath10k_mac_tx_push_pending(ar);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-               skb_queue_tail(&htt->tx_compl_q, skb);
+               ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
                tasklet_schedule(&htt->txrx_compl_task);
-               return;
+               break;
        case HTT_T2H_MSG_TYPE_SEC_IND: {
                struct ath10k *ar = htt->ar;
                struct htt_security_indication *ev = &resp->security_indication;
@@ -2071,7 +2313,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
                                skb->data, skb->len);
-               ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+               ath10k_htt_rx_frag_handler(htt);
                break;
        }
        case HTT_T2H_MSG_TYPE_TEST:
@@ -2111,18 +2353,39 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
                skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
                tasklet_schedule(&htt->txrx_compl_task);
-               return;
+               return false;
        }
        case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
                break;
-       case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+       case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+               u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+               u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+               ar->tgt_oper_chan =
+                       __ieee80211_get_channel(ar->hw->wiphy, freq);
+               ath10k_dbg(ar, ATH10K_DBG_HTT,
+                          "htt chan change freq %u phymode %s\n",
+                          freq, ath10k_wmi_phymode_str(phymode));
                break;
+       }
        case HTT_T2H_MSG_TYPE_AGGR_CONF:
                break;
-       case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+       case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+               struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+               if (!tx_fetch_ind) {
+                       ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+                       break;
+               }
+               skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+               tasklet_schedule(&htt->txrx_compl_task);
+               break;
+       }
        case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+               ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+               break;
        case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
-               /* TODO: Implement pull-push logic */
+               ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
                break;
        case HTT_T2H_MSG_TYPE_EN_STATS:
        default:
@@ -2132,9 +2395,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                                skb->data, skb->len);
                break;
        };
-
-       /* Free the indication buffer */
-       dev_kfree_skb_any(skb);
+       return true;
 }
 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
 
@@ -2150,40 +2411,47 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
 {
        struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
        struct ath10k *ar = htt->ar;
-       struct sk_buff_head tx_q;
-       struct sk_buff_head rx_q;
+       struct htt_tx_done tx_done = {};
        struct sk_buff_head rx_ind_q;
-       struct htt_resp *resp;
+       struct sk_buff_head tx_ind_q;
        struct sk_buff *skb;
        unsigned long flags;
+       int num_mpdus;
 
-       __skb_queue_head_init(&tx_q);
-       __skb_queue_head_init(&rx_q);
        __skb_queue_head_init(&rx_ind_q);
-
-       spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
-       skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
-       spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
-
-       spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
-       skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
-       spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
+       __skb_queue_head_init(&tx_ind_q);
 
        spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
        skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
        spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
 
-       while ((skb = __skb_dequeue(&tx_q))) {
-               ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+       spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+       skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+       spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+       /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+        * From kfifo_get() documentation:
+        *  Note that with only one concurrent reader and one concurrent writer,
+        *  you don't need extra locking to use these macro.
+        */
+       while (kfifo_get(&htt->txdone_fifo, &tx_done))
+               ath10k_txrx_tx_unref(htt, &tx_done);
+
+       while ((skb = __skb_dequeue(&tx_ind_q))) {
+               ath10k_htt_rx_tx_fetch_ind(ar, skb);
                dev_kfree_skb_any(skb);
        }
 
-       while ((skb = __skb_dequeue(&rx_q))) {
-               resp = (struct htt_resp *)skb->data;
-               spin_lock_bh(&htt->rx_ring.lock);
-               ath10k_htt_rx_handler(htt, &resp->rx_ind);
-               spin_unlock_bh(&htt->rx_ring.lock);
-               dev_kfree_skb_any(skb);
+       ath10k_mac_tx_push_pending(ar);
+
+       num_mpdus = atomic_read(&htt->num_mpdus_ready);
+
+       while (num_mpdus) {
+               if (ath10k_htt_rx_handle_amsdu(htt))
+                       break;
+
+               num_mpdus--;
+               atomic_dec(&htt->num_mpdus_ready);
        }
 
        while ((skb = __skb_dequeue(&rx_ind_q))) {
@@ -2192,4 +2460,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
                spin_unlock_bh(&htt->rx_ring.lock);
                dev_kfree_skb_any(skb);
        }
+
+       ath10k_htt_rx_msdu_buff_replenish(htt);
 }
index 95acb727c068f242f1324a39f04c540291071126..6269c610b0a3cbb295a2122c056c230a1822ba53 100644 (file)
 #include "txrx.h"
 #include "debug.h"
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
 {
-       if (limit_mgmt_desc)
-               htt->num_pending_mgmt_tx--;
+       int exp;
+       int factor;
+
+       exp = 0;
+       factor = count >> 7;
+
+       while (factor >= 64 && exp < 4) {
+               factor >>= 3;
+               exp++;
+       }
+
+       if (exp == 4)
+               return 0xff;
+
+       if (count > 0)
+               factor = max(1, factor);
+
+       return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+              SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+                                      struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+       struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+       unsigned long frame_cnt;
+       unsigned long byte_cnt;
+       int idx;
+       u32 bit;
+       u16 peer_id;
+       u8 tid;
+       u8 count;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       if (!ar->htt.tx_q_state.enabled)
+               return;
+
+       if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+               return;
+
+       if (txq->sta)
+               peer_id = arsta->peer_id;
+       else
+               peer_id = arvif->peer_id;
+
+       tid = txq->tid;
+       bit = BIT(peer_id % 32);
+       idx = peer_id / 32;
+
+       ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+       count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+       if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+           unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+               ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+                           peer_id, tid);
+               return;
+       }
+
+       ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+       ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+       ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+                  peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+       u32 seq;
+       size_t size;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       if (!ar->htt.tx_q_state.enabled)
+               return;
+
+       if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+               return;
+
+       seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+       seq++;
+       ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+                  seq);
+
+       size = sizeof(*ar->htt.tx_q_state.vaddr);
+       dma_sync_single_for_device(ar->dev,
+                                  ar->htt.tx_q_state.paddr,
+                                  size,
+                                  DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       __ath10k_htt_tx_txq_recalc(hw, txq);
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+       spin_lock_bh(&ar->htt.tx_lock);
+       __ath10k_htt_tx_txq_sync(ar);
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       __ath10k_htt_tx_txq_recalc(hw, txq);
+       __ath10k_htt_tx_txq_sync(ar);
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+       lockdep_assert_held(&htt->tx_lock);
 
        htt->num_pending_tx--;
        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
                ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 }
 
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
-                                     bool limit_mgmt_desc)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 {
-       spin_lock_bh(&htt->tx_lock);
-       __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
-       spin_unlock_bh(&htt->tx_lock);
+       lockdep_assert_held(&htt->tx_lock);
+
+       if (htt->num_pending_tx >= htt->max_num_pending_tx)
+               return -EBUSY;
+
+       htt->num_pending_tx++;
+       if (htt->num_pending_tx == htt->max_num_pending_tx)
+               ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+       return 0;
 }
 
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
-                                    bool limit_mgmt_desc, bool is_probe_resp)
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+                                  bool is_presp)
 {
        struct ath10k *ar = htt->ar;
-       int ret = 0;
 
-       spin_lock_bh(&htt->tx_lock);
+       lockdep_assert_held(&htt->tx_lock);
 
-       if (htt->num_pending_tx >= htt->max_num_pending_tx) {
-               ret = -EBUSY;
-               goto exit;
-       }
+       if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+               return 0;
 
-       if (limit_mgmt_desc) {
-               if (is_probe_resp && (htt->num_pending_mgmt_tx >
-                   ar->hw_params.max_probe_resp_desc_thres)) {
-                       ret = -EBUSY;
-                       goto exit;
-               }
-               htt->num_pending_mgmt_tx++;
-       }
+       if (is_presp &&
+           ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+               return -EBUSY;
 
-       htt->num_pending_tx++;
-       if (htt->num_pending_tx == htt->max_num_pending_tx)
-               ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+       htt->num_pending_mgmt_tx++;
 
-exit:
-       spin_unlock_bh(&htt->tx_lock);
-       return ret;
+       return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+       lockdep_assert_held(&htt->tx_lock);
+
+       if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+               return;
+
+       htt->num_pending_mgmt_tx--;
 }
 
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -137,7 +267,8 @@ static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
        struct ath10k *ar = htt->ar;
        size_t size;
 
-       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                     ar->running_fw->fw_file.fw_features))
                return;
 
        size = sizeof(*htt->tx_q_state.vaddr);
@@ -152,7 +283,8 @@ static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
        size_t size;
        int ret;
 
-       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                     ar->running_fw->fw_file.fw_features))
                return 0;
 
        htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
@@ -209,8 +341,18 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
                goto free_frag_desc;
        }
 
+       size = roundup_pow_of_two(htt->max_num_pending_tx);
+       ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+       if (ret) {
+               ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+               goto free_txq;
+       }
+
        return 0;
 
+free_txq:
+       ath10k_htt_tx_free_txq(htt);
+
 free_frag_desc:
        ath10k_htt_tx_free_cont_frag_desc(htt);
 
@@ -234,8 +376,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 
        ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
 
-       tx_done.discard = 1;
        tx_done.msdu_id = msdu_id;
+       tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 
        ath10k_txrx_tx_unref(htt, &tx_done);
 
@@ -258,6 +400,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
 
        ath10k_htt_tx_free_txq(htt);
        ath10k_htt_tx_free_cont_frag_desc(htt);
+       WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+       kfifo_free(&htt->txdone_fifo);
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -371,7 +515,8 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
        info |= SM(htt->tx_q_state.type,
                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 
-       if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+       if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                    ar->running_fw->fw_file.fw_features))
                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 
        cfg = &cmd->frag_desc_bank_cfg;
@@ -535,6 +680,55 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
        return 0;
 }
 
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+                            __le32 token,
+                            __le16 fetch_seq_num,
+                            struct htt_tx_fetch_record *records,
+                            size_t num_records)
+{
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       const u16 resp_id = 0;
+       int len = 0;
+       int ret;
+
+       /* Response IDs are echo-ed back only for host driver convienence
+        * purposes. They aren't used for anything in the driver yet so use 0.
+        */
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->tx_fetch_resp);
+       len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+       skb = ath10k_htc_alloc_skb(ar, len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+       cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+       cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+       cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+       cmd->tx_fetch_resp.token = token;
+
+       memcpy(cmd->tx_fetch_resp.records, records,
+              sizeof(records[0]) * num_records);
+
+       ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+               goto err_free_skb;
+       }
+
+       return 0;
+
+err_free_skb:
+       dev_kfree_skb_any(skb);
+
+       return ret;
+}
+
 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -576,20 +770,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        int msdu_id = -1;
        int res;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
-       bool limit_mgmt_desc = false;
-       bool is_probe_resp = false;
-
-       if (ar->hw_params.max_probe_resp_desc_thres) {
-               limit_mgmt_desc = true;
-
-               if (ieee80211_is_probe_resp(hdr->frame_control))
-                       is_probe_resp = true;
-       }
-
-       res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
-       if (res)
-               goto err;
 
        len += sizeof(cmd->hdr);
        len += sizeof(cmd->mgmt_tx);
@@ -598,7 +778,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
        spin_unlock_bh(&htt->tx_lock);
        if (res < 0)
-               goto err_tx_dec;
+               goto err;
 
        msdu_id = res;
 
@@ -649,8 +829,6 @@ err_free_msdu_id:
        spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
        spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-       ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
        return res;
 }
@@ -677,26 +855,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
        u32 frags_paddr = 0;
        u32 txbuf_paddr;
        struct htt_msdu_ext_desc *ext_desc = NULL;
-       bool limit_mgmt_desc = false;
-       bool is_probe_resp = false;
-
-       if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
-           ar->hw_params.max_probe_resp_desc_thres) {
-               limit_mgmt_desc = true;
-
-               if (ieee80211_is_probe_resp(hdr->frame_control))
-                       is_probe_resp = true;
-       }
-
-       res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-       if (res)
-               goto err;
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
        spin_unlock_bh(&htt->tx_lock);
        if (res < 0)
-               goto err_tx_dec;
+               goto err;
 
        msdu_id = res;
 
@@ -862,11 +1026,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 err_unmap_msdu:
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_msdu_id:
-       spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-       spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-       ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
        return res;
 }
index f0cfbc745c97dea91d368e5de4ef72bdb00702e2..aedd8987040b655db665064f5c846208b6eee89c 100644 (file)
@@ -35,8 +35,6 @@
 #define QCA988X_HW_2_0_VERSION         0x4100016c
 #define QCA988X_HW_2_0_CHIP_ID_REV     0x2
 #define QCA988X_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
-#define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
 
@@ -76,14 +74,10 @@ enum qca9377_chip_id_rev {
 };
 
 #define QCA6174_HW_2_1_FW_DIR          "ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE         "firmware.bin"
-#define QCA6174_HW_2_1_OTP_FILE                "otp.bin"
 #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
 #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
 
 #define QCA6174_HW_3_0_FW_DIR          "ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE         "firmware.bin"
-#define QCA6174_HW_3_0_OTP_FILE                "otp.bin"
 #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
 
@@ -94,23 +88,17 @@ enum qca9377_chip_id_rev {
 #define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
 #define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
 #define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
-#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
 #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
 
 /* QCA9377 1.0 definitions */
 #define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
 #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
 
 /* QCA4019 1.0 definitions */
 #define QCA4019_HW_1_0_DEV_VERSION     0x01000000
 #define QCA4019_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA4019_HW_1_0_OTP_FILE        "otp.bin"
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
@@ -134,8 +122,6 @@ enum qca9377_chip_id_rev {
 
 #define REG_DUMP_COUNT_QCA988X 60
 
-#define QCA988X_CAL_DATA_LEN           2116
-
 struct ath10k_fw_ie {
        __le32 id;
        __le32 len;
@@ -431,10 +417,14 @@ enum ath10k_hw_4addr_pad {
 #define TARGET_10_4_ACTIVE_PEERS               0
 
 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX       512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS                50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC    35
 #define TARGET_10_4_NUM_OFFLOAD_PEERS          0
 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS  0
 #define TARGET_10_4_NUM_PEER_KEYS              2
 #define TARGET_10_4_TGT_NUM_TIDS               ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC              (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC          2500
 #define TARGET_10_4_AST_SKID_LIMIT             32
 
 /* 100 ms for video, best-effort, and background */
index 78999c9de23b350a7cc871ee446792a2825b77f2..0e24f9ee8bffdd98850a23088bab80a111f3a95e 100644 (file)
@@ -157,6 +157,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
        return 1;
 }
 
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+       enum wmi_host_platform_type platform_type;
+       int ret;
+
+       if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+               platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+       else
+               platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+       ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+       if (ret && ret != -EOPNOTSUPP) {
+               ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 /**********/
 /* Crypto */
 /**********/
@@ -449,10 +469,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
        lockdep_assert_held(&ar->conf_mutex);
 
        list_for_each_entry(peer, &ar->peers, list) {
-               if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+               if (ether_addr_equal(peer->addr, arvif->vif->addr))
                        continue;
 
-               if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+               if (ether_addr_equal(peer->addr, arvif->bssid))
                        continue;
 
                if (peer->keys[key->keyidx] == key)
@@ -482,7 +502,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
        enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
        switch (chandef->chan->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                switch (chandef->width) {
                case NL80211_CHAN_WIDTH_20_NOHT:
                        if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -505,7 +525,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
                        break;
                }
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                switch (chandef->width) {
                case NL80211_CHAN_WIDTH_20_NOHT:
                        phymode = MODE_11A;
@@ -618,10 +638,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
        *def = &conf->def;
 }
 
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_create(struct ath10k *ar,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_sta *sta,
+                             u32 vdev_id,
+                             const u8 *addr,
                              enum wmi_peer_type peer_type)
 {
        struct ath10k_vif *arvif;
+       struct ath10k_peer *peer;
        int num_peers = 0;
        int ret;
 
@@ -650,6 +675,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
                return ret;
        }
 
+       spin_lock_bh(&ar->data_lock);
+
+       peer = ath10k_peer_find(ar, vdev_id, addr);
+       if (!peer) {
+               ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+                           addr, vdev_id);
+               ath10k_wmi_peer_delete(ar, vdev_id, addr);
+               spin_unlock_bh(&ar->data_lock);
+               return -ENOENT;
+       }
+
+       peer->vif = vif;
+       peer->sta = sta;
+
+       spin_unlock_bh(&ar->data_lock);
+
        ar->num_peers++;
 
        return 0;
@@ -731,6 +772,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
 {
        struct ath10k_peer *peer, *tmp;
+       int peer_id;
 
        lockdep_assert_held(&ar->conf_mutex);
 
@@ -742,6 +784,11 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
                ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
                            peer->addr, vdev_id);
 
+               for_each_set_bit(peer_id, peer->peer_ids,
+                                ATH10K_MAX_NUM_PEER_IDS) {
+                       ar->peer_map[peer_id] = NULL;
+               }
+
                list_del(&peer->list);
                kfree(peer);
                ar->num_peers--;
@@ -1725,7 +1772,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 
        if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
            !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
-                     ar->fw_features)) {
+                     ar->running_fw->fw_file.fw_features)) {
                ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
                            arvif->vdev_id);
                enable_ps = false;
@@ -2013,7 +2060,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
        }
 
        if (sta->mfp &&
-           test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) {
+           test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+                    ar->running_fw->fw_file.fw_features)) {
                arg->peer_flags |= ar->wmi.peer_flags->pmf;
        }
 }
@@ -2028,7 +2076,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
        struct cfg80211_chan_def def;
        const struct ieee80211_supported_band *sband;
        const struct ieee80211_rate *rates;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u32 ratemask;
        u8 rate;
        int i;
@@ -2088,7 +2136,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
        const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct cfg80211_chan_def def;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const u8 *ht_mcs_mask;
        const u16 *vht_mcs_mask;
        int i, n;
@@ -2312,7 +2360,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
        const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct cfg80211_chan_def def;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const u16 *vht_mcs_mask;
        u8 ampdu_factor;
 
@@ -2330,7 +2378,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
 
        arg->peer_flags |= ar->wmi.peer_flags->vht;
 
-       if (def.chan->band == IEEE80211_BAND_2GHZ)
+       if (def.chan->band == NL80211_BAND_2GHZ)
                arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
 
        arg->peer_vht_caps = vht_cap->cap;
@@ -2399,7 +2447,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
 
 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
 {
-       return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+       return sta->supp_rates[NL80211_BAND_2GHZ] >>
               ATH10K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
@@ -2410,7 +2458,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
 {
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct cfg80211_chan_def def;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const u8 *ht_mcs_mask;
        const u16 *vht_mcs_mask;
        enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2423,7 +2471,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
        vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
 
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                if (sta->vht_cap.vht_supported &&
                    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
                        if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2443,7 +2491,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                }
 
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                /*
                 * Check VHT first.
                 */
@@ -2821,7 +2869,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
 {
        struct ieee80211_hw *hw = ar->hw;
        struct ieee80211_supported_band **bands;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_channel *channel;
        struct wmi_scan_chan_list_arg arg = {0};
        struct wmi_channel_arg *ch;
@@ -2833,7 +2881,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
        lockdep_assert_held(&ar->conf_mutex);
 
        bands = hw->wiphy->bands;
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!bands[band])
                        continue;
 
@@ -2852,7 +2900,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
                return -ENOMEM;
 
        ch = arg.channels;
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!bands[band])
                        continue;
 
@@ -2890,7 +2938,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
                        /* FIXME: why use only legacy modes, why not any
                         * HT/VHT modes? Would that even make any
                         * difference? */
-                       if (channel->band == IEEE80211_BAND_2GHZ)
+                       if (channel->band == NL80211_BAND_2GHZ)
                                ch->mode = MODE_11G;
                        else
                                ch->mode = MODE_11A;
@@ -2994,6 +3042,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
 /* TX handlers */
 /***************/
 
+enum ath10k_mac_tx_path {
+       ATH10K_MAC_TX_HTT,
+       ATH10K_MAC_TX_HTT_MGMT,
+       ATH10K_MAC_TX_WMI_MGMT,
+       ATH10K_MAC_TX_UNKNOWN,
+};
+
 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
 {
        lockdep_assert_held(&ar->htt.tx_lock);
@@ -3153,7 +3208,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
         */
        if (ar->htt.target_version_major < 3 &&
            (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
-           !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+           !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+                     ar->running_fw->fw_file.fw_features))
                return ATH10K_HW_TXRX_MGMT;
 
        /* Workaround:
@@ -3271,6 +3327,28 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
        }
 }
 
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_txq *txq,
+                                   struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+       cb->flags = 0;
+       if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+               cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               cb->flags |= ATH10K_SKB_F_MGMT;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               cb->flags |= ATH10K_SKB_F_QOS;
+
+       cb->vif = vif;
+       cb->txq = txq;
+}
+
 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
 {
        /* FIXME: Not really sure since when the behaviour changed. At some
@@ -3281,7 +3359,7 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
         */
        return (ar->htt.target_version_major >= 3 &&
                ar->htt.target_version_minor >= 4 &&
-               ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+               ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
 }
 
 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3306,26 +3384,50 @@ unlock:
        return ret;
 }
 
-static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
-                         struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+                          struct sk_buff *skb,
+                          enum ath10k_hw_txrx_mode txmode)
 {
-       struct ath10k_htt *htt = &ar->htt;
-       int ret = 0;
-
        switch (txmode) {
        case ATH10K_HW_TXRX_RAW:
        case ATH10K_HW_TXRX_NATIVE_WIFI:
        case ATH10K_HW_TXRX_ETHERNET:
-               ret = ath10k_htt_tx(htt, txmode, skb);
-               break;
+               return ATH10K_MAC_TX_HTT;
        case ATH10K_HW_TXRX_MGMT:
                if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-                            ar->fw_features))
-                       ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+                            ar->running_fw->fw_file.fw_features))
+                       return ATH10K_MAC_TX_WMI_MGMT;
                else if (ar->htt.target_version_major >= 3)
-                       ret = ath10k_htt_tx(htt, txmode, skb);
+                       return ATH10K_MAC_TX_HTT;
                else
-                       ret = ath10k_htt_mgmt_tx(htt, skb);
+                       return ATH10K_MAC_TX_HTT_MGMT;
+       }
+
+       return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+                               enum ath10k_hw_txrx_mode txmode,
+                               enum ath10k_mac_tx_path txpath,
+                               struct sk_buff *skb)
+{
+       struct ath10k_htt *htt = &ar->htt;
+       int ret = -EINVAL;
+
+       switch (txpath) {
+       case ATH10K_MAC_TX_HTT:
+               ret = ath10k_htt_tx(htt, txmode, skb);
+               break;
+       case ATH10K_MAC_TX_HTT_MGMT:
+               ret = ath10k_htt_mgmt_tx(htt, skb);
+               break;
+       case ATH10K_MAC_TX_WMI_MGMT:
+               ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+               break;
+       case ATH10K_MAC_TX_UNKNOWN:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
                break;
        }
 
@@ -3334,6 +3436,64 @@ static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
                            ret);
                ieee80211_free_txskb(ar->hw, skb);
        }
+
+       return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+                        struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta,
+                        enum ath10k_hw_txrx_mode txmode,
+                        enum ath10k_mac_tx_path txpath,
+                        struct sk_buff *skb)
+{
+       struct ieee80211_hw *hw = ar->hw;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int ret;
+
+       /* We should disable CCK RATE due to P2P */
+       if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+       switch (txmode) {
+       case ATH10K_HW_TXRX_MGMT:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
+               ath10k_tx_h_nwifi(hw, skb);
+               ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+               ath10k_tx_h_seq_no(vif, skb);
+               break;
+       case ATH10K_HW_TXRX_ETHERNET:
+               ath10k_tx_h_8023(skb);
+               break;
+       case ATH10K_HW_TXRX_RAW:
+               if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+                       WARN_ON_ONCE(1);
+                       ieee80211_free_txskb(hw, skb);
+                       return -ENOTSUPP;
+               }
+       }
+
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+               if (!ath10k_mac_tx_frm_has_freq(ar)) {
+                       ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+                                  skb);
+
+                       skb_queue_tail(&ar->offchan_tx_queue, skb);
+                       ieee80211_queue_work(hw, &ar->offchan_tx_work);
+                       return 0;
+               }
+       }
+
+       ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
 }
 
 void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3354,12 +3514,13 @@ void ath10k_offchan_tx_work(struct work_struct *work)
        struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
        struct ath10k_peer *peer;
        struct ath10k_vif *arvif;
+       enum ath10k_hw_txrx_mode txmode;
+       enum ath10k_mac_tx_path txpath;
        struct ieee80211_hdr *hdr;
        struct ieee80211_vif *vif;
        struct ieee80211_sta *sta;
        struct sk_buff *skb;
        const u8 *peer_addr;
-       enum ath10k_hw_txrx_mode txmode;
        int vdev_id;
        int ret;
        unsigned long time_left;
@@ -3396,7 +3557,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                                   peer_addr, vdev_id);
 
                if (!peer) {
-                       ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+                       ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+                                                peer_addr,
                                                 WMI_PEER_TYPE_DEFAULT);
                        if (ret)
                                ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3423,8 +3585,14 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                }
 
                txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+               txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
 
-               ath10k_mac_tx(ar, txmode, skb);
+               ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+               if (ret) {
+                       ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+                                   ret);
+                       /* not serious */
+               }
 
                time_left =
                wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
@@ -3476,6 +3644,175 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
        }
 }
 
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+       struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+       if (!txq)
+               return;
+
+       INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+       struct ath10k_txq *artxq = (void *)txq->drv_priv;
+       struct ath10k_skb_cb *cb;
+       struct sk_buff *msdu;
+       int msdu_id;
+
+       if (!txq)
+               return;
+
+       spin_lock_bh(&ar->txqs_lock);
+       if (!list_empty(&artxq->list))
+               list_del_init(&artxq->list);
+       spin_unlock_bh(&ar->txqs_lock);
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+               cb = ATH10K_SKB_CB(msdu);
+               if (cb->txq == txq)
+                       cb->txq = NULL;
+       }
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+                                           u16 peer_id,
+                                           u8 tid)
+{
+       struct ath10k_peer *peer;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       peer = ar->peer_map[peer_id];
+       if (!peer)
+               return NULL;
+
+       if (peer->sta)
+               return peer->sta->txq[tid];
+       else if (peer->vif)
+               return peer->vif->txq;
+       else
+               return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+                                  struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+       /* No need to get locks */
+
+       if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+               return true;
+
+       if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+               return true;
+
+       if (artxq->num_fw_queued < artxq->num_push_allowed)
+               return true;
+
+       return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+                          struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_htt *htt = &ar->htt;
+       struct ath10k_txq *artxq = (void *)txq->drv_priv;
+       struct ieee80211_vif *vif = txq->vif;
+       struct ieee80211_sta *sta = txq->sta;
+       enum ath10k_hw_txrx_mode txmode;
+       enum ath10k_mac_tx_path txpath;
+       struct sk_buff *skb;
+       size_t skb_len;
+       int ret;
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       ret = ath10k_htt_tx_inc_pending(htt);
+       spin_unlock_bh(&ar->htt.tx_lock);
+
+       if (ret)
+               return ret;
+
+       skb = ieee80211_tx_dequeue(hw, txq);
+       if (!skb) {
+               spin_lock_bh(&ar->htt.tx_lock);
+               ath10k_htt_tx_dec_pending(htt);
+               spin_unlock_bh(&ar->htt.tx_lock);
+
+               return -ENOENT;
+       }
+
+       ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+       skb_len = skb->len;
+       txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+       txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+       ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+       if (unlikely(ret)) {
+               ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+               spin_lock_bh(&ar->htt.tx_lock);
+               ath10k_htt_tx_dec_pending(htt);
+               spin_unlock_bh(&ar->htt.tx_lock);
+
+               return ret;
+       }
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       artxq->num_fw_queued++;
+       spin_unlock_bh(&ar->htt.tx_lock);
+
+       return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+       struct ieee80211_hw *hw = ar->hw;
+       struct ieee80211_txq *txq;
+       struct ath10k_txq *artxq;
+       struct ath10k_txq *last;
+       int ret;
+       int max;
+
+       spin_lock_bh(&ar->txqs_lock);
+       rcu_read_lock();
+
+       last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+       while (!list_empty(&ar->txqs)) {
+               artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+               txq = container_of((void *)artxq, struct ieee80211_txq,
+                                  drv_priv);
+
+               /* Prevent aggressive sta/tid taking over tx queue */
+               max = 16;
+               ret = 0;
+               while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+                       ret = ath10k_mac_tx_push_txq(hw, txq);
+                       if (ret < 0)
+                               break;
+               }
+
+               list_del_init(&artxq->list);
+               if (ret != -ENOENT)
+                       list_add_tail(&artxq->list, &ar->txqs);
+
+               ath10k_htt_tx_txq_update(hw, txq);
+
+               if (artxq == last || (ret < 0 && ret != -ENOENT))
+                       break;
+       }
+
+       rcu_read_unlock();
+       spin_unlock_bh(&ar->txqs_lock);
+}
+
 /************/
 /* Scanning */
 /************/
@@ -3531,7 +3868,7 @@ static int ath10k_scan_stop(struct ath10k *ar)
                goto out;
        }
 
-       ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+       ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
        if (ret == 0) {
                ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
                ret = -ETIMEDOUT;
@@ -3611,7 +3948,7 @@ static int ath10k_start_scan(struct ath10k *ar,
        if (ret)
                return ret;
 
-       ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+       ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
        if (ret == 0) {
                ret = ath10k_scan_stop(ar);
                if (ret)
@@ -3638,66 +3975,86 @@ static int ath10k_start_scan(struct ath10k *ar,
 /* mac80211 callbacks */
 /**********************/
 
-static void ath10k_tx(struct ieee80211_hw *hw,
-                     struct ieee80211_tx_control *control,
-                     struct sk_buff *skb)
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+                            struct ieee80211_tx_control *control,
+                            struct sk_buff *skb)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct ath10k_htt *htt = &ar->htt;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_vif *vif = info->control.vif;
        struct ieee80211_sta *sta = control->sta;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_txq *txq = NULL;
+       struct ieee80211_hdr *hdr = (void *)skb->data;
        enum ath10k_hw_txrx_mode txmode;
+       enum ath10k_mac_tx_path txpath;
+       bool is_htt;
+       bool is_mgmt;
+       bool is_presp;
+       int ret;
 
-       /* We should disable CCK RATE due to P2P */
-       if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+       ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
 
        txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+       txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+       is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+                 txpath == ATH10K_MAC_TX_HTT_MGMT);
+       is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
 
-       skb_cb->flags = 0;
-       if (!ath10k_tx_h_use_hwcrypto(vif, skb))
-               skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
-
-       if (ieee80211_is_mgmt(hdr->frame_control))
-               skb_cb->flags |= ATH10K_SKB_F_MGMT;
+       if (is_htt) {
+               spin_lock_bh(&ar->htt.tx_lock);
+               is_presp = ieee80211_is_probe_resp(hdr->frame_control);
 
-       if (ieee80211_is_data_qos(hdr->frame_control))
-               skb_cb->flags |= ATH10K_SKB_F_QOS;
-
-       skb_cb->vif = vif;
+               ret = ath10k_htt_tx_inc_pending(htt);
+               if (ret) {
+                       ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+                                   ret);
+                       spin_unlock_bh(&ar->htt.tx_lock);
+                       ieee80211_free_txskb(ar->hw, skb);
+                       return;
+               }
 
-       switch (txmode) {
-       case ATH10K_HW_TXRX_MGMT:
-       case ATH10K_HW_TXRX_NATIVE_WIFI:
-               ath10k_tx_h_nwifi(hw, skb);
-               ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
-               ath10k_tx_h_seq_no(vif, skb);
-               break;
-       case ATH10K_HW_TXRX_ETHERNET:
-               ath10k_tx_h_8023(skb);
-               break;
-       case ATH10K_HW_TXRX_RAW:
-               if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
-                       WARN_ON_ONCE(1);
-                       ieee80211_free_txskb(hw, skb);
+               ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+               if (ret) {
+                       ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+                                  ret);
+                       ath10k_htt_tx_dec_pending(htt);
+                       spin_unlock_bh(&ar->htt.tx_lock);
+                       ieee80211_free_txskb(ar->hw, skb);
                        return;
                }
+               spin_unlock_bh(&ar->htt.tx_lock);
        }
 
-       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
-               if (!ath10k_mac_tx_frm_has_freq(ar)) {
-                       ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
-                                  skb);
-
-                       skb_queue_tail(&ar->offchan_tx_queue, skb);
-                       ieee80211_queue_work(hw, &ar->offchan_tx_work);
-                       return;
+       ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+               if (is_htt) {
+                       spin_lock_bh(&ar->htt.tx_lock);
+                       ath10k_htt_tx_dec_pending(htt);
+                       if (is_mgmt)
+                               ath10k_htt_tx_mgmt_dec_pending(htt);
+                       spin_unlock_bh(&ar->htt.tx_lock);
                }
+               return;
        }
+}
+
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+                                       struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+       spin_lock_bh(&ar->txqs_lock);
+       if (list_empty(&artxq->list))
+               list_add_tail(&artxq->list, &ar->txqs);
+       spin_unlock_bh(&ar->txqs_lock);
 
-       ath10k_mac_tx(ar, txmode, skb);
+       if (ath10k_mac_tx_can_push(hw, txq))
+               tasklet_schedule(&ar->htt.txrx_compl_task);
+
+       ath10k_htt_tx_txq_update(hw, txq);
 }
 
 /* Must not be called with conf_mutex held as workers can use that also. */
@@ -3919,14 +4276,14 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
        vht_cap = ath10k_create_vht_cap(ar);
 
        if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
-               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+               band = &ar->mac.sbands[NL80211_BAND_2GHZ];
                band->ht_cap = ht_cap;
 
                /* Enable the VHT support at 2.4 GHz */
                band->vht_cap = vht_cap;
        }
        if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
-               band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+               band = &ar->mac.sbands[NL80211_BAND_5GHZ];
                band->ht_cap = ht_cap;
                band->vht_cap = vht_cap;
        }
@@ -4021,7 +4378,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
                goto err_off;
        }
 
-       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+                               &ar->normal_mode_fw);
        if (ret) {
                ath10k_err(ar, "Could not init core: %d\n", ret);
                goto err_power_down;
@@ -4079,7 +4437,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
        }
 
        if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
-                    ar->fw_features)) {
+                    ar->running_fw->fw_file.fw_features)) {
                ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
                                                          WMI_CCA_DETECT_LEVEL_AUTO,
                                                          WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4100,7 +4458,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
 
        ar->ani_enabled = true;
 
-       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+       if (ath10k_peer_stats_enabled(ar)) {
                param = ar->wmi.pdev_param->peer_stats_update_period;
                ret = ath10k_wmi_pdev_set_param(ar, param,
                                                PEER_DEFAULT_STATS_UPDATE_PERIOD);
@@ -4313,6 +4671,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_peer *peer;
        enum wmi_sta_powersave_param param;
        int ret = 0;
        u32 value;
@@ -4325,6 +4684,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
 
        memset(arvif, 0, sizeof(*arvif));
+       ath10k_mac_txq_init(vif->txq);
 
        arvif->ar = ar;
        arvif->vif = vif;
@@ -4489,7 +4849,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                goto err_vdev_delete;
        }
 
-       if (ar->cfg_tx_chainmask) {
+       /* Configuring number of spatial stream for monitor interface is causing
+        * target assert in qca9888 and qca6174.
+        */
+       if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
                u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
 
                vdev_param = ar->wmi.vdev_param->nss;
@@ -4505,13 +4868,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
            arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-               ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
-                                        WMI_PEER_TYPE_DEFAULT);
+               ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+                                        vif->addr, WMI_PEER_TYPE_DEFAULT);
                if (ret) {
                        ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_vdev_delete;
                }
+
+               spin_lock_bh(&ar->data_lock);
+
+               peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+               if (!peer) {
+                       ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+                                   vif->addr, arvif->vdev_id);
+                       spin_unlock_bh(&ar->data_lock);
+                       ret = -ENOENT;
+                       goto err_peer_delete;
+               }
+
+               arvif->peer_id = find_first_bit(peer->peer_ids,
+                                               ATH10K_MAX_NUM_PEER_IDS);
+
+               spin_unlock_bh(&ar->data_lock);
+       } else {
+               arvif->peer_id = HTT_INVALID_PEERID;
        }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4622,7 +5003,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_peer *peer;
        int ret;
+       int i;
 
        cancel_work_sync(&arvif->ap_csa_work);
        cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4676,7 +5059,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                spin_unlock_bh(&ar->data_lock);
        }
 
+       spin_lock_bh(&ar->data_lock);
+       for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+               peer = ar->peer_map[i];
+               if (!peer)
+                       continue;
+
+               if (peer->vif == vif) {
+                       ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+                                   vif->addr, arvif->vdev_id);
+                       peer->vif = NULL;
+               }
+       }
+       spin_unlock_bh(&ar->data_lock);
+
        ath10k_peer_cleanup(ar, arvif->vdev_id);
+       ath10k_mac_txq_unref(ar, vif->txq);
 
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                ar->monitor_arvif = NULL;
@@ -4689,6 +5087,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        ath10k_mac_vif_tx_unlock_all(arvif);
        spin_unlock_bh(&ar->htt.tx_lock);
 
+       ath10k_mac_txq_unref(ar, vif->txq);
+
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -5218,7 +5618,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
        struct ath10k_sta *arsta;
        struct ieee80211_sta *sta;
        struct cfg80211_chan_def def;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const u8 *ht_mcs_mask;
        const u16 *vht_mcs_mask;
        u32 changed, bw, nss, smps;
@@ -5393,13 +5793,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k_peer *peer;
        int ret = 0;
+       int i;
 
        if (old_state == IEEE80211_STA_NOTEXIST &&
            new_state == IEEE80211_STA_NONE) {
                memset(arsta, 0, sizeof(*arsta));
                arsta->arvif = arvif;
                INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+               for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+                       ath10k_mac_txq_init(sta->txq[i]);
        }
 
        /* cancel must be done outside the mutex to avoid deadlock */
@@ -5434,8 +5839,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                if (sta->tdls)
                        peer_type = WMI_PEER_TYPE_TDLS;
 
-               ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
-                                        peer_type);
+               ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+                                        sta->addr, peer_type);
                if (ret) {
                        ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
@@ -5443,6 +5848,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                        goto exit;
                }
 
+               spin_lock_bh(&ar->data_lock);
+
+               peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+               if (!peer) {
+                       ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+                                   vif->addr, arvif->vdev_id);
+                       spin_unlock_bh(&ar->data_lock);
+                       ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+                       ath10k_mac_dec_num_stations(arvif, sta);
+                       ret = -ENOENT;
+                       goto exit;
+               }
+
+               arsta->peer_id = find_first_bit(peer->peer_ids,
+                                               ATH10K_MAX_NUM_PEER_IDS);
+
+               spin_unlock_bh(&ar->data_lock);
+
                if (!sta->tdls)
                        goto exit;
 
@@ -5505,6 +5928,23 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ath10k_mac_dec_num_stations(arvif, sta);
 
+               spin_lock_bh(&ar->data_lock);
+               for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+                       peer = ar->peer_map[i];
+                       if (!peer)
+                               continue;
+
+                       if (peer->sta == sta) {
+                               ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
+                                           sta->addr, arvif->vdev_id);
+                               peer->sta = NULL;
+                       }
+               }
+               spin_unlock_bh(&ar->data_lock);
+
+               for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+                       ath10k_mac_txq_unref(ar, sta->txq[i]);
+
                if (!sta->tdls)
                        goto exit;
 
@@ -5751,7 +6191,7 @@ exit:
        return ret;
 }
 
-#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
 
 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
@@ -5815,7 +6255,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                goto exit;
        }
 
-       ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+       ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
        if (ret == 0) {
                ath10k_warn(ar, "failed to switch to channel for roc scan\n");
 
@@ -5977,14 +6417,14 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
 
        mutex_lock(&ar->conf_mutex);
 
-       sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
        if (sband && idx >= sband->n_channels) {
                idx -= sband->n_channels;
                sband = NULL;
        }
 
        if (!sband)
-               sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+               sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
        if (!sband || idx >= sband->n_channels) {
                ret = -ENOENT;
@@ -6007,7 +6447,7 @@ exit:
 
 static bool
 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
-                                       enum ieee80211_band band,
+                                       enum nl80211_band band,
                                        const struct cfg80211_bitrate_mask *mask)
 {
        int num_rates = 0;
@@ -6026,7 +6466,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
 
 static bool
 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
-                                      enum ieee80211_band band,
+                                      enum nl80211_band band,
                                       const struct cfg80211_bitrate_mask *mask,
                                       int *nss)
 {
@@ -6075,7 +6515,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
 
 static int
 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
-                                       enum ieee80211_band band,
+                                       enum nl80211_band band,
                                        const struct cfg80211_bitrate_mask *mask,
                                        u8 *rate, u8 *nss)
 {
@@ -6176,7 +6616,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
 
 static bool
 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
-                               enum ieee80211_band band,
+                               enum nl80211_band band,
                                const struct cfg80211_bitrate_mask *mask)
 {
        int i;
@@ -6228,7 +6668,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct cfg80211_chan_def def;
        struct ath10k *ar = arvif->ar;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const u8 *ht_mcs_mask;
        const u16 *vht_mcs_mask;
        u8 rate;
@@ -6379,6 +6819,32 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        return 0;
 }
 
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          u64 tsf)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+       int ret;
+
+       /* Workaround:
+        *
+        * Given tsf argument is entire TSF value, but firmware accepts
+        * only TSF offset to current TSF.
+        *
+        * get_tsf function is used to get offset value, however since
+        * ath10k_get_tsf is not implemented properly, it will return 0 always.
+        * Luckily all the caller functions to set_tsf, as of now, also rely on
+        * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+        * final tsf offset value to firmware will be arithmetically correct.
+        */
+       tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                       vdev_param, tsf_offset);
+       if (ret && ret != -EOPNOTSUPP)
+               ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct ieee80211_ampdu_params *params)
@@ -6450,7 +6916,13 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
                        def = &vifs[0].new_ctx->def;
 
                ar->rx_channel = def->chan;
-       } else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) {
+       } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
+                  (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
+               /* During driver restart due to firmware assert, since mac80211
+                * already has valid channel context for given radio, channel
+                * context iteration return num_chanctx > 0. So fix rx_channel
+                * when restart is in progress.
+                */
                ar->rx_channel = ctx->def.chan;
        } else {
                ar->rx_channel = NULL;
@@ -6807,7 +7279,8 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
 }
 
 static const struct ieee80211_ops ath10k_ops = {
-       .tx                             = ath10k_tx,
+       .tx                             = ath10k_mac_op_tx,
+       .wake_tx_queue                  = ath10k_mac_op_wake_tx_queue,
        .start                          = ath10k_start,
        .stop                           = ath10k_stop,
        .config                         = ath10k_config,
@@ -6834,6 +7307,7 @@ static const struct ieee80211_ops ath10k_ops = {
        .set_bitrate_mask               = ath10k_mac_op_set_bitrate_mask,
        .sta_rc_update                  = ath10k_sta_rc_update,
        .get_tsf                        = ath10k_get_tsf,
+       .set_tsf                        = ath10k_set_tsf,
        .ampdu_action                   = ath10k_ampdu_action,
        .get_et_sset_count              = ath10k_debug_get_et_sset_count,
        .get_et_stats                   = ath10k_debug_get_et_stats,
@@ -6857,7 +7331,7 @@ static const struct ieee80211_ops ath10k_ops = {
 };
 
 #define CHAN2G(_channel, _freq, _flags) { \
-       .band                   = IEEE80211_BAND_2GHZ, \
+       .band                   = NL80211_BAND_2GHZ, \
        .hw_value               = (_channel), \
        .center_freq            = (_freq), \
        .flags                  = (_flags), \
@@ -6866,7 +7340,7 @@ static const struct ieee80211_ops ath10k_ops = {
 }
 
 #define CHAN5G(_channel, _freq, _flags) { \
-       .band                   = IEEE80211_BAND_5GHZ, \
+       .band                   = NL80211_BAND_5GHZ, \
        .hw_value               = (_channel), \
        .center_freq            = (_freq), \
        .flags                  = (_flags), \
@@ -7186,13 +7660,13 @@ int ath10k_mac_register(struct ath10k *ar)
                        goto err_free;
                }
 
-               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+               band = &ar->mac.sbands[NL80211_BAND_2GHZ];
                band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
                band->channels = channels;
                band->n_bitrates = ath10k_g_rates_size;
                band->bitrates = ath10k_g_rates;
 
-               ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+               ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
        }
 
        if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7204,12 +7678,12 @@ int ath10k_mac_register(struct ath10k *ar)
                        goto err_free;
                }
 
-               band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+               band = &ar->mac.sbands[NL80211_BAND_5GHZ];
                band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
                band->channels = channels;
                band->n_bitrates = ath10k_a_rates_size;
                band->bitrates = ath10k_a_rates;
-               ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+               ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
        }
 
        ath10k_mac_setup_ht_vht_cap(ar);
@@ -7222,7 +7696,7 @@ int ath10k_mac_register(struct ath10k *ar)
        ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
        ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
 
-       if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+       if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
                ar->hw->wiphy->interface_modes |=
                        BIT(NL80211_IFTYPE_P2P_DEVICE) |
                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -7262,6 +7736,7 @@ int ath10k_mac_register(struct ath10k *ar)
 
        ar->hw->vif_data_size = sizeof(struct ath10k_vif);
        ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+       ar->hw->txq_data_size = sizeof(struct ath10k_txq);
 
        ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
@@ -7286,7 +7761,8 @@ int ath10k_mac_register(struct ath10k *ar)
        ar->hw->wiphy->max_remain_on_channel_duration = 5000;
 
        ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-       ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+       ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+                                  NL80211_FEATURE_AP_SCAN;
 
        ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
 
@@ -7310,7 +7786,7 @@ int ath10k_mac_register(struct ath10k *ar)
         */
        ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
 
-       switch (ar->wmi.op_version) {
+       switch (ar->running_fw->fw_file.wmi_op_version) {
        case ATH10K_FW_WMI_OP_VERSION_MAIN:
                ar->hw->wiphy->iface_combinations = ath10k_if_comb;
                ar->hw->wiphy->n_iface_combinations =
@@ -7395,8 +7871,8 @@ err_dfs_detector_exit:
                ar->dfs_detector->exit(ar->dfs_detector);
 
 err_free:
-       kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
-       kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+       kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+       kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
 
        SET_IEEE80211_DEV(ar->hw, NULL);
        return ret;
@@ -7409,8 +7885,8 @@ void ath10k_mac_unregister(struct ath10k *ar)
        if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
                ar->dfs_detector->exit(ar->dfs_detector);
 
-       kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
-       kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+       kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+       kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
 
        SET_IEEE80211_DEV(ar->hw, NULL);
 }
index 53091588090d0c5645989a347c1d5a95a5033ecb..1bd29ecfcdcc913ff8d3e447eb0d85c4d3c56ec2 100644 (file)
@@ -75,6 +75,13 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+                          struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+                                           u16 peer_id,
+                                           u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
index b3cff1d3364aad85151b4abd54d05bee0d2e02a3..8133d7b5b95647a43ab6bd5513dffe796cd0c2f9 100644 (file)
 #include "ce.h"
 #include "pci.h"
 
-enum ath10k_pci_irq_mode {
-       ATH10K_PCI_IRQ_AUTO = 0,
-       ATH10K_PCI_IRQ_LEGACY = 1,
-       ATH10K_PCI_IRQ_MSI = 2,
-};
-
 enum ath10k_pci_reset_mode {
        ATH10K_PCI_RESET_AUTO = 0,
        ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -745,10 +739,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       if (ar_pci->num_msi_intrs > 1)
-               return "msi-x";
-
-       if (ar_pci->num_msi_intrs == 1)
+       if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
                return "msi";
 
        return "legacy";
@@ -809,7 +800,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
        spin_lock_bh(&ar_pci->ce_lock);
        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
        spin_unlock_bh(&ar_pci->ce_lock);
-       while (num--) {
+
+       while (num >= 0) {
                ret = __ath10k_pci_rx_post_buf(pipe);
                if (ret) {
                        if (ret == -ENOSPC)
@@ -819,6 +811,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
                                  ATH10K_PCI_RX_POST_RETRY_MS);
                        break;
                }
+               num--;
        }
 }
 
@@ -870,10 +863,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
-       u32 buf;
+       u32 *buf;
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-       unsigned int id;
-       unsigned int flags;
        struct ath10k_ce_pipe *ce_diag;
        /* Host buffer address in CE space */
        u32 ce_data;
@@ -909,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                nbytes = min_t(unsigned int, remaining_bytes,
                               DIAG_TRANSFER_LIMIT);
 
-               ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+               ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
                if (ret != 0)
                        goto done;
 
@@ -940,9 +931,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                }
 
                i = 0;
-               while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id, &flags) != 0) {
+               while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+                                                           (void **)&buf,
+                                                           &completed_nbytes)
+                                                               != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +948,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                        goto done;
                }
 
-               if (buf != ce_data) {
+               if (*buf != ce_data) {
                        ret = -EIO;
                        goto done;
                }
@@ -1026,10 +1018,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
-       u32 buf;
+       u32 *buf;
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-       unsigned int id;
-       unsigned int flags;
        struct ath10k_ce_pipe *ce_diag;
        void *data_buf = NULL;
        u32 ce_data;    /* Host buffer address in CE space */
@@ -1078,7 +1068,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
                /* Set up to receive directly into Target(!) address */
-               ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+               ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
                if (ret != 0)
                        goto done;
 
@@ -1103,9 +1093,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                }
 
                i = 0;
-               while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id, &flags) != 0) {
+               while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+                                                           (void **)&buf,
+                                                           &completed_nbytes)
+                                                               != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                        goto done;
                }
 
-               if (buf != address) {
+               if (*buf != address) {
                        ret = -EIO;
                        goto done;
                }
@@ -1181,15 +1172,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
        struct sk_buff *skb;
        struct sk_buff_head list;
        void *transfer_context;
-       u32 ce_data;
        unsigned int nbytes, max_nbytes;
-       unsigned int transfer_id;
-       unsigned int flags;
 
        __skb_queue_head_init(&list);
        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
-                                            &ce_data, &nbytes, &transfer_id,
-                                            &flags) == 0) {
+                                            &nbytes) == 0) {
                skb = transfer_context;
                max_nbytes = skb->len + skb_tailroom(skb);
                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1205,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
        ath10k_pci_rx_post_pipe(pipe_info);
 }
 
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+                                        void (*callback)(struct ath10k *ar,
+                                                         struct sk_buff *skb))
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+       struct sk_buff *skb;
+       struct sk_buff_head list;
+       void *transfer_context;
+       unsigned int nbytes, max_nbytes, nentries;
+       int orig_len;
+
+       /* No need to aquire ce_lock for CE5, since this is the only place CE5
+        * is processed other than init and deinit. Before releasing CE5
+        * buffers, interrupts are disabled. Thus CE5 access is serialized.
+        */
+       __skb_queue_head_init(&list);
+       while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+                                                   &nbytes) == 0) {
+               skb = transfer_context;
+               max_nbytes = skb->len + skb_tailroom(skb);
+
+               if (unlikely(max_nbytes < nbytes)) {
+                       ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+                                   nbytes, max_nbytes);
+                       continue;
+               }
+
+               dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                       max_nbytes, DMA_FROM_DEVICE);
+               skb_put(skb, nbytes);
+               __skb_queue_tail(&list, skb);
+       }
+
+       nentries = skb_queue_len(&list);
+       while ((skb = __skb_dequeue(&list))) {
+               ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+                          ce_state->id, skb->len);
+               ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+                               skb->data, skb->len);
+
+               orig_len = skb->len;
+               callback(ar, skb);
+               skb_push(skb, orig_len - skb->len);
+               skb_reset_tail_pointer(skb);
+               skb_trim(skb, 0);
+
+               /*let device gain the buffer again*/
+               dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                          skb->len + skb_tailroom(skb),
+                                          DMA_FROM_DEVICE);
+       }
+       ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
 /* Called by lower (CE) layer when data is received from the Target. */
 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
 {
@@ -1274,7 +1318,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
         */
        ath10k_ce_per_engine_service(ce_state->ar, 4);
 
-       ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+       ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
 }
 
 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1449,13 +1493,8 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
 void ath10k_pci_kill_tasklet(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
 
        tasklet_kill(&ar_pci->intr_tq);
-       tasklet_kill(&ar_pci->msi_fw_err);
-
-       for (i = 0; i < CE_COUNT; i++)
-               tasklet_kill(&ar_pci->pipe_info[i].intr);
 
        del_timer_sync(&ar_pci->rx_post_retry);
 }
@@ -1571,10 +1610,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar)
 static void ath10k_pci_irq_sync(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
 
-       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-               synchronize_irq(ar_pci->pdev->irq + i);
+       synchronize_irq(ar_pci->pdev->irq);
 }
 
 static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1835,13 +1872,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct bmi_xfer *xfer;
-       u32 ce_data;
        unsigned int nbytes;
-       unsigned int transfer_id;
-       unsigned int flags;
 
-       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
-                                         &nbytes, &transfer_id, &flags))
+       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+                                         &nbytes))
                return;
 
        if (WARN_ON_ONCE(!xfer))
@@ -2546,65 +2580,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
 #endif
 };
 
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
-       struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
-       struct ath10k_pci *ar_pci = pipe->ar_pci;
-
-       ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
-       struct ath10k *ar = (struct ath10k *)data;
-
-       if (!ath10k_pci_has_fw_crashed(ar)) {
-               ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
-               return;
-       }
-
-       ath10k_pci_irq_disable(ar);
-       ath10k_pci_fw_crashed_clear(ar);
-       ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
-       struct ath10k *ar = arg;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
-       if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
-               ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
-                           ce_id);
-               return IRQ_HANDLED;
-       }
-
-       /*
-        * NOTE: We are able to derive ce_id from irq because we
-        * use a one-to-one mapping for CE's 0..5.
-        * CE's 6 & 7 do not use interrupts at all.
-        *
-        * This mapping must be kept in sync with the mapping
-        * used by firmware.
-        */
-       tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
-       struct ath10k *ar = arg;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       tasklet_schedule(&ar_pci->msi_fw_err);
-       return IRQ_HANDLED;
-}
-
 /*
  * Top-level interrupt handler for all PCI interrupts from a Target.
  * When a block of MSI interrupts is allocated, this top-level handler
@@ -2622,7 +2597,7 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
                return IRQ_NONE;
        }
 
-       if (ar_pci->num_msi_intrs == 0) {
+       if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
                if (!ath10k_pci_irq_pending(ar))
                        return IRQ_NONE;
 
@@ -2649,43 +2624,10 @@ static void ath10k_pci_tasklet(unsigned long data)
        ath10k_ce_per_engine_service_any(ar);
 
        /* Re-enable legacy irq that was disabled in the irq handler */
-       if (ar_pci->num_msi_intrs == 0)
+       if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
                ath10k_pci_enable_legacy_irq(ar);
 }
 
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ret, i;
-
-       ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
-                         ath10k_pci_msi_fw_handler,
-                         IRQF_SHARED, "ath10k_pci", ar);
-       if (ret) {
-               ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
-                           ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-               return ret;
-       }
-
-       for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
-               ret = request_irq(ar_pci->pdev->irq + i,
-                                 ath10k_pci_per_engine_handler,
-                                 IRQF_SHARED, "ath10k_pci", ar);
-               if (ret) {
-                       ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
-                                   ar_pci->pdev->irq + i, ret);
-
-                       for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
-                               free_irq(ar_pci->pdev->irq + i, ar);
-
-                       free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2724,41 +2666,28 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       switch (ar_pci->num_msi_intrs) {
-       case 0:
+       switch (ar_pci->oper_irq_mode) {
+       case ATH10K_PCI_IRQ_LEGACY:
                return ath10k_pci_request_irq_legacy(ar);
-       case 1:
+       case ATH10K_PCI_IRQ_MSI:
                return ath10k_pci_request_irq_msi(ar);
        default:
-               return ath10k_pci_request_irq_msix(ar);
+               return -EINVAL;
        }
 }
 
 static void ath10k_pci_free_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
 
-       /* There's at least one interrupt irregardless whether its legacy INTR
-        * or MSI or MSI-X */
-       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-               free_irq(ar_pci->pdev->irq + i, ar);
+       free_irq(ar_pci->pdev->irq, ar);
 }
 
 void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
 
        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
-       tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
-                    (unsigned long)ar);
-
-       for (i = 0; i < CE_COUNT; i++) {
-               ar_pci->pipe_info[i].ar_pci = ar_pci;
-               tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
-                            (unsigned long)&ar_pci->pipe_info[i]);
-       }
 }
 
 static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2772,20 +2701,9 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
                ath10k_info(ar, "limiting irq mode to: %d\n",
                            ath10k_pci_irq_mode);
 
-       /* Try MSI-X */
-       if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
-               ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
-               ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
-                                          ar_pci->num_msi_intrs);
-               if (ret > 0)
-                       return 0;
-
-               /* fall-through */
-       }
-
        /* Try MSI */
        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
-               ar_pci->num_msi_intrs = 1;
+               ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
                ret = pci_enable_msi(ar_pci->pdev);
                if (ret == 0)
                        return 0;
@@ -2801,7 +2719,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
         * This write might get lost if target has NOT written BAR.
         * For now, fix the race by repeating the write in below
         * synchronization checking. */
-       ar_pci->num_msi_intrs = 0;
+       ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
 
        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2819,8 +2737,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       switch (ar_pci->num_msi_intrs) {
-       case 0:
+       switch (ar_pci->oper_irq_mode) {
+       case ATH10K_PCI_IRQ_LEGACY:
                ath10k_pci_deinit_irq_legacy(ar);
                break;
        default:
@@ -2858,7 +2776,7 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
                if (val & FW_IND_INITIALIZED)
                        break;
 
-               if (ar_pci->num_msi_intrs == 0)
+               if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
                        /* Fix potential race by repeating CORE_BASE writes */
                        ath10k_pci_enable_legacy_irq(ar);
 
@@ -3136,8 +3054,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_sleep;
        }
 
-       ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
-                   ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+       ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+                   ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
 
        ret = ath10k_pci_request_irq(ar);
@@ -3255,7 +3173,6 @@ MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
 
 /* QCA988x 2.0 firmware files */
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
index 249c73a6980088408afaed9530d68701dcd2fa06..959dc321b75ec0739d9549b7d7b43aecf90ea93d 100644 (file)
@@ -148,9 +148,6 @@ struct ath10k_pci_pipe {
 
        /* protects compl_free and num_send_allowed */
        spinlock_t pipe_lock;
-
-       struct ath10k_pci *ar_pci;
-       struct tasklet_struct intr;
 };
 
 struct ath10k_pci_supp_chip {
@@ -164,6 +161,12 @@ struct ath10k_bus_ops {
        int (*get_num_banks)(struct ath10k *ar);
 };
 
+enum ath10k_pci_irq_mode {
+       ATH10K_PCI_IRQ_AUTO = 0,
+       ATH10K_PCI_IRQ_LEGACY = 1,
+       ATH10K_PCI_IRQ_MSI = 2,
+};
+
 struct ath10k_pci {
        struct pci_dev *pdev;
        struct device *dev;
@@ -171,14 +174,10 @@ struct ath10k_pci {
        void __iomem *mem;
        size_t mem_len;
 
-       /*
-        * Number of MSI interrupts granted, 0 --> using legacy PCI line
-        * interrupts.
-        */
-       int num_msi_intrs;
+       /* Operating interrupt mode */
+       enum ath10k_pci_irq_mode oper_irq_mode;
 
        struct tasklet_struct intr_tq;
-       struct tasklet_struct msi_fw_err;
 
        struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
index 3ca3fae408a771971afe55a010ee7491d4c8f3a3..0c5f5863dac878b38ba5467b75762bf59e6b9d72 100644 (file)
@@ -134,27 +134,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
        return seg_info;
 }
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
-                                  enum ath10k_swap_code_seg_bin_type type)
+int ath10k_swap_code_seg_configure(struct ath10k *ar)
 {
        int ret;
        struct ath10k_swap_code_seg_info *seg_info = NULL;
 
-       switch (type) {
-       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
-               if (!ar->swap.firmware_swap_code_seg_info)
-                       return 0;
-
-               ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
-               seg_info = ar->swap.firmware_swap_code_seg_info;
-               break;
-       default:
-       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
-       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
-               ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
-                           type);
+       if (!ar->swap.firmware_swap_code_seg_info)
                return 0;
-       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+       seg_info = ar->swap.firmware_swap_code_seg_info;
 
        ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
                                      &seg_info->seg_hw_info,
@@ -171,8 +161,13 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar,
 void ath10k_swap_code_seg_release(struct ath10k *ar)
 {
        ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
-       ar->swap.firmware_codeswap_data = NULL;
-       ar->swap.firmware_codeswap_len = 0;
+
+       /* FIXME: these two assignments look to bein wrong place! Shouldn't
+        * they be in ath10k_core_free_firmware_files() like the rest?
+        */
+       ar->normal_mode_fw.fw_file.codeswap_data = NULL;
+       ar->normal_mode_fw.fw_file.codeswap_len = 0;
+
        ar->swap.firmware_swap_code_seg_info = NULL;
 }
 
@@ -180,20 +175,23 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
 {
        int ret;
        struct ath10k_swap_code_seg_info *seg_info;
+       const void *codeswap_data;
+       size_t codeswap_len;
+
+       codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
+       codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
 
-       if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+       if (!codeswap_len || !codeswap_data)
                return 0;
 
-       seg_info = ath10k_swap_code_seg_alloc(ar,
-                                             ar->swap.firmware_codeswap_len);
+       seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
        if (!seg_info) {
                ath10k_err(ar, "failed to allocate fw code swap segment\n");
                return -ENOMEM;
        }
 
        ret = ath10k_swap_code_seg_fill(ar, seg_info,
-                                       ar->swap.firmware_codeswap_data,
-                                       ar->swap.firmware_codeswap_len);
+                                       codeswap_data, codeswap_len);
 
        if (ret) {
                ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
index 5c89952dd20f196e57e7a8008c1c5bf8b0b11a15..36991c7b07a04f08339ce9eaa19e4a8ca8940fc1 100644 (file)
@@ -39,12 +39,6 @@ union ath10k_swap_code_seg_item {
        struct ath10k_swap_code_seg_tail tail;
 } __packed;
 
-enum ath10k_swap_code_seg_bin_type {
-        ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
-        ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
-        ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
 struct ath10k_swap_code_seg_hw_info {
        /* Swap binary image size */
        __le32 swap_size;
@@ -64,8 +58,7 @@ struct ath10k_swap_code_seg_info {
        dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
 };
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
-                                  enum ath10k_swap_code_seg_bin_type type);
+int ath10k_swap_code_seg_configure(struct ath10k *ar);
 void ath10k_swap_code_seg_release(struct ath10k *ar);
 int ath10k_swap_code_seg_init(struct ath10k *ar);
 
index 361f143b019c1a1c1c0c7c53cb7d442bd5dce772..8e24099fa936b72292b15e06b1f5b6a5d0fcd8cf 100644 (file)
@@ -438,7 +438,7 @@ Fw Mode/SubMode Mask
        ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
 #define HI_DEV_LPL_TYPE_GET(_devix) \
        (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
-        (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+        (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
 
 #define HOST_INTEREST_SMPS_IS_ALLOWED() \
        ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
index 1d5a2fdcbf56f1c9beacd08d6a5b201b2b97d752..120f4234d3b0b5f023d00a940e11e03ac1498e39 100644 (file)
@@ -139,127 +139,8 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
        return cfg80211_testmode_reply(skb);
 }
 
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
-       size_t len, magic_len, ie_len;
-       struct ath10k_fw_ie *hdr;
-       char filename[100];
-       __le32 *version;
-       const u8 *data;
-       int ie_id, ret;
-
-       snprintf(filename, sizeof(filename), "%s/%s",
-                ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
-       /* load utf firmware image */
-       ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
-       if (ret) {
-               ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
-                           filename, ret);
-               return ret;
-       }
-
-       data = ar->testmode.utf->data;
-       len = ar->testmode.utf->size;
-
-       /* FIXME: call release_firmware() in error cases */
-
-       /* magic also includes the null byte, check that as well */
-       magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
-       if (len < magic_len) {
-               ath10k_err(ar, "utf firmware file is too small to contain magic\n");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
-               ath10k_err(ar, "invalid firmware magic\n");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       /* jump over the padding */
-       magic_len = ALIGN(magic_len, 4);
-
-       len -= magic_len;
-       data += magic_len;
-
-       /* loop elements */
-       while (len > sizeof(struct ath10k_fw_ie)) {
-               hdr = (struct ath10k_fw_ie *)data;
-
-               ie_id = le32_to_cpu(hdr->id);
-               ie_len = le32_to_cpu(hdr->len);
-
-               len -= sizeof(*hdr);
-               data += sizeof(*hdr);
-
-               if (len < ie_len) {
-                       ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
-                                  ie_id, len, ie_len);
-                       ret = -EINVAL;
-                       goto err;
-               }
-
-               switch (ie_id) {
-               case ATH10K_FW_IE_FW_VERSION:
-                       if (ie_len > sizeof(ar->testmode.utf_version) - 1)
-                               break;
-
-                       memcpy(ar->testmode.utf_version, data, ie_len);
-                       ar->testmode.utf_version[ie_len] = '\0';
-
-                       ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-                                  "testmode found fw utf version %s\n",
-                                  ar->testmode.utf_version);
-                       break;
-               case ATH10K_FW_IE_TIMESTAMP:
-                       /* ignore timestamp, but don't warn about it either */
-                       break;
-               case ATH10K_FW_IE_FW_IMAGE:
-                       ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-                                  "testmode found fw image ie (%zd B)\n",
-                                  ie_len);
-
-                       ar->testmode.utf_firmware_data = data;
-                       ar->testmode.utf_firmware_len = ie_len;
-                       break;
-               case ATH10K_FW_IE_WMI_OP_VERSION:
-                       if (ie_len != sizeof(u32))
-                               break;
-                       version = (__le32 *)data;
-                       ar->testmode.op_version = le32_to_cpup(version);
-                       ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
-                                  ar->testmode.op_version);
-                       break;
-               default:
-                       ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
-                                   le32_to_cpu(hdr->id));
-                       break;
-               }
-               /* jump over the padding */
-               ie_len = ALIGN(ie_len, 4);
-
-               len -= ie_len;
-               data += ie_len;
-       }
-
-       if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
-               ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       return 0;
-
-err:
-       release_firmware(ar->testmode.utf);
-
-       return ret;
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+                                             struct ath10k_fw_file *fw_file)
 {
        char filename[100];
        int ret;
@@ -268,7 +149,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
                 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
 
        /* load utf firmware image */
-       ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+       ret = request_firmware(&fw_file->firmware, filename, ar->dev);
        if (ret) {
                ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
                            filename, ret);
@@ -281,24 +162,27 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
         * correct WMI interface.
         */
 
-       ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
-       ar->testmode.utf_firmware_data = ar->testmode.utf->data;
-       ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+       fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+       fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+       fw_file->firmware_data = fw_file->firmware->data;
+       fw_file->firmware_len = fw_file->firmware->size;
 
        return 0;
 }
 
 static int ath10k_tm_fetch_firmware(struct ath10k *ar)
 {
+       struct ath10k_fw_components *utf_mode_fw;
        int ret;
 
-       ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+                                              &ar->testmode.utf_mode_fw.fw_file);
        if (ret == 0) {
                ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
-               return 0;
+               goto out;
        }
 
-       ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+       ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
        if (ret) {
                ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
                return ret;
@@ -306,6 +190,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar)
 
        ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
 
+out:
+       utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+       /* Use the same board data file as the normal firmware uses (but
+        * it's still "owned" by normal_mode_fw so we shouldn't free it.
+        */
+       utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+       utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+       if (!utf_mode_fw->fw_file.otp_data) {
+               ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+               utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+               utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+       }
+
        return 0;
 }
 
@@ -329,7 +228,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
                goto err;
        }
 
-       if (WARN_ON(ar->testmode.utf != NULL)) {
+       if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
                /* utf image is already downloaded, it shouldn't be */
                ret = -EEXIST;
                goto err;
@@ -344,27 +243,19 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
        spin_lock_bh(&ar->data_lock);
        ar->testmode.utf_monitor = true;
        spin_unlock_bh(&ar->data_lock);
-       BUILD_BUG_ON(sizeof(ar->fw_features) !=
-                    sizeof(ar->testmode.orig_fw_features));
-
-       memcpy(ar->testmode.orig_fw_features, ar->fw_features,
-              sizeof(ar->fw_features));
-       ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
-       memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
-       ar->wmi.op_version = ar->testmode.op_version;
 
        ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
-                  ar->wmi.op_version);
+                  ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
 
        ret = ath10k_hif_power_up(ar);
        if (ret) {
                ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
                ar->state = ATH10K_STATE_OFF;
-               goto err_fw_features;
+               goto err_release_utf_mode_fw;
        }
 
-       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+       ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+                               &ar->testmode.utf_mode_fw);
        if (ret) {
                ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
                ar->state = ATH10K_STATE_OFF;
@@ -373,8 +264,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
 
        ar->state = ATH10K_STATE_UTF;
 
-       if (strlen(ar->testmode.utf_version) > 0)
-               ver = ar->testmode.utf_version;
+       if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+               ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
        else
                ver = "API 1";
 
@@ -387,14 +278,9 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
 err_power_down:
        ath10k_hif_power_down(ar);
 
-err_fw_features:
-       /* return the original firmware features */
-       memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-              sizeof(ar->fw_features));
-       ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
-       release_firmware(ar->testmode.utf);
-       ar->testmode.utf = NULL;
+err_release_utf_mode_fw:
+       release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+       ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
 err:
        mutex_unlock(&ar->conf_mutex);
@@ -415,13 +301,8 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
 
        spin_unlock_bh(&ar->data_lock);
 
-       /* return the original firmware features */
-       memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-              sizeof(ar->fw_features));
-       ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
-       release_firmware(ar->testmode.utf);
-       ar->testmode.utf = NULL;
+       release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+       ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
        ar->state = ATH10K_STATE_OFF;
 }
index c9223e9e962f1caea4cfe72b05a227efed0ab74a..3abb97f63b1edf6ef72b34bc0f684f1fcb996eb0 100644 (file)
@@ -20,7 +20,7 @@
 #define ATH10K_QUIET_PERIOD_MIN         25
 #define ATH10K_QUIET_START_OFFSET       10
 #define ATH10K_HWMON_NAME_LEN           15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
 #define ATH10K_THERMAL_THROTTLE_MAX     100
 
 struct ath10k_thermal {
index fbfb608e48abe3dce52ccd1868068f9db86593bc..576e7c42ed657549d90dbd31ea74f3fbac5ee47e 100644 (file)
@@ -49,25 +49,25 @@ out:
        spin_unlock_bh(&ar->data_lock);
 }
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
-                         const struct htt_tx_done *tx_done)
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+                        const struct htt_tx_done *tx_done)
 {
        struct ath10k *ar = htt->ar;
        struct device *dev = ar->dev;
        struct ieee80211_tx_info *info;
+       struct ieee80211_txq *txq;
        struct ath10k_skb_cb *skb_cb;
+       struct ath10k_txq *artxq;
        struct sk_buff *msdu;
-       bool limit_mgmt_desc = false;
 
        ath10k_dbg(ar, ATH10K_DBG_HTT,
-                  "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
-                  tx_done->msdu_id, !!tx_done->discard,
-                  !!tx_done->no_ack, !!tx_done->success);
+                  "htt tx completion msdu_id %u status %d\n",
+                  tx_done->msdu_id, tx_done->status);
 
        if (tx_done->msdu_id >= htt->max_num_pending_tx) {
                ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
                            tx_done->msdu_id);
-               return;
+               return -EINVAL;
        }
 
        spin_lock_bh(&htt->tx_lock);
@@ -76,17 +76,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
                            tx_done->msdu_id);
                spin_unlock_bh(&htt->tx_lock);
-               return;
+               return -ENOENT;
        }
 
        skb_cb = ATH10K_SKB_CB(msdu);
+       txq = skb_cb->txq;
+       artxq = (void *)txq->drv_priv;
 
-       if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) &&
-           ar->hw_params.max_probe_resp_desc_thres)
-               limit_mgmt_desc = true;
+       if (txq)
+               artxq->num_fw_queued--;
 
        ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-       __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+       ath10k_htt_tx_dec_pending(htt);
        if (htt->num_pending_tx == 0)
                wake_up(&htt->empty_tx_wq);
        spin_unlock_bh(&htt->tx_lock);
@@ -99,22 +100,24 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        memset(&info->status, 0, sizeof(info->status));
        trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
 
-       if (tx_done->discard) {
+       if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
                ieee80211_free_txskb(htt->ar->hw, msdu);
-               return;
+               return 0;
        }
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                info->flags |= IEEE80211_TX_STAT_ACK;
 
-       if (tx_done->no_ack)
+       if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
                info->flags &= ~IEEE80211_TX_STAT_ACK;
 
-       if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+       if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+           (info->flags & IEEE80211_TX_CTL_NO_ACK))
                info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
+       return 0;
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -127,7 +130,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
        list_for_each_entry(peer, &ar->peers, list) {
                if (peer->vdev_id != vdev_id)
                        continue;
-               if (memcmp(peer->addr, addr, ETH_ALEN))
+               if (!ether_addr_equal(peer->addr, addr))
                        continue;
 
                return peer;
@@ -163,7 +166,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
 
                        (mapped == expect_mapped ||
                         test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
-               }), 3*HZ);
+               }), 3 * HZ);
 
        if (time_left == 0)
                return -ETIMEDOUT;
@@ -187,6 +190,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
        struct ath10k *ar = htt->ar;
        struct ath10k_peer *peer;
 
+       if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+               ath10k_warn(ar,
+                           "received htt peer map event with idx out of bounds: %hu\n",
+                           ev->peer_id);
+               return;
+       }
+
        spin_lock_bh(&ar->data_lock);
        peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
        if (!peer) {
@@ -203,6 +213,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
                   ev->vdev_id, ev->addr, ev->peer_id);
 
+       ar->peer_map[ev->peer_id] = peer;
        set_bit(ev->peer_id, peer->peer_ids);
 exit:
        spin_unlock_bh(&ar->data_lock);
@@ -214,6 +225,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
        struct ath10k *ar = htt->ar;
        struct ath10k_peer *peer;
 
+       if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+               ath10k_warn(ar,
+                           "received htt peer unmap event with idx out of bounds: %hu\n",
+                           ev->peer_id);
+               return;
+       }
+
        spin_lock_bh(&ar->data_lock);
        peer = ath10k_peer_find_by_id(ar, ev->peer_id);
        if (!peer) {
@@ -225,6 +243,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
                   peer->vdev_id, peer->addr, ev->peer_id);
 
+       ar->peer_map[ev->peer_id] = NULL;
        clear_bit(ev->peer_id, peer->peer_ids);
 
        if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
index a90e09f5c7f2f180370e118175991bea68018d32..e7ea1ae1c438de06331d2fc0f3db86d4329d31fe 100644 (file)
@@ -19,8 +19,8 @@
 
 #include "htt.h"
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
-                         const struct htt_tx_done *tx_done);
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+                        const struct htt_tx_done *tx_done);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
                                     const u8 *addr);
index 32ab34edceb54927fa4f668ac7c37ec4f728226a..7fb00dcc03b81eb07117f8c61fe1fc5ea8791aba 100644 (file)
@@ -186,6 +186,9 @@ struct wmi_ops {
                                                        u8 enable,
                                                        u32 detect_level,
                                                        u32 detect_margin);
+       struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+                                              enum wmi_host_platform_type type,
+                                              u32 fw_feature_bitmap);
        int (*get_vdev_subtype)(struct ath10k *ar,
                                enum wmi_vdev_subtype subtype);
 };
@@ -1329,6 +1332,26 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
                                   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
 }
 
+static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+                              enum wmi_host_platform_type type,
+                              u32 fw_feature_bitmap)
+{
+       struct sk_buff *skb;
+
+       if (!ar->wmi.ops->ext_resource_config)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->ext_resource_config(ar, type,
+                                              fw_feature_bitmap);
+
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
 static inline int
 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
 {
index 108593202052fb86fe43060ec6f19ef113417d0f..e09337ee7c9664cccb3368faf59dc460845f5f80 100644 (file)
@@ -3409,6 +3409,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
        .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
        .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
index dd678590531ae18d9ae195f4a8f10fcb362e4993..b8aa6000573cd1abfa2f2c79fa53310a85b13faf 100644 (file)
@@ -968,8 +968,8 @@ enum wmi_tlv_service {
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
        ((svc_id) < (len) && \
-        __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
-        BIT((svc_id)%(sizeof(u32))))
+        __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+        BIT((svc_id) % (sizeof(u32))))
 
 #define SVCMAP(x, y, len) \
        do { \
index 70261387d1a5733a6069e9274222d49ec9a8cb99..621019f435316c767b7ff1474bf47d23462b4a41 100644 (file)
@@ -705,6 +705,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
        .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
        .pdev_bss_chan_info_request_cmdid =
                        WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+       .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -780,6 +781,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
        .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
        .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -855,6 +857,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
        .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
        .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +932,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
        .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
        .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1008,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
        .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
        .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
        .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+       .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1803,7 +1808,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
                        ret = -ESHUTDOWN;
 
                (ret != -EAGAIN);
-       }), 3*HZ);
+       }), 3 * HZ);
 
        if (ret)
                dev_kfree_skb_any(skb);
@@ -2099,34 +2104,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
        return 0;
 }
 
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
-       enum ieee80211_band band;
-
-       switch (phy_mode) {
-       case MODE_11A:
-       case MODE_11NA_HT20:
-       case MODE_11NA_HT40:
-       case MODE_11AC_VHT20:
-       case MODE_11AC_VHT40:
-       case MODE_11AC_VHT80:
-               band = IEEE80211_BAND_5GHZ;
-               break;
-       case MODE_11G:
-       case MODE_11B:
-       case MODE_11GONLY:
-       case MODE_11NG_HT20:
-       case MODE_11NG_HT40:
-       case MODE_11AC_VHT20_2G:
-       case MODE_11AC_VHT40_2G:
-       case MODE_11AC_VHT80_2G:
-       default:
-               band = IEEE80211_BAND_2GHZ;
-       }
-
-       return band;
-}
-
 /* If keys are configured, HW decrypts all frames
  * with protected bit set. Mark such frames as decrypted.
  */
@@ -2167,10 +2144,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
        struct wmi_mgmt_rx_event_v1 *ev_v1;
        struct wmi_mgmt_rx_event_v2 *ev_v2;
        struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+       struct wmi_mgmt_rx_ext_info *ext_info;
        size_t pull_len;
        u32 msdu_len;
+       u32 len;
 
-       if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+       if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+                    ar->running_fw->fw_file.fw_features)) {
                ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
                ev_hdr = &ev_v2->hdr.v1;
                pull_len = sizeof(*ev_v2);
@@ -2195,6 +2175,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
        if (skb->len < msdu_len)
                return -EPROTO;
 
+       if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+               len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+               ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+               memcpy(&arg->ext_info, ext_info,
+                      sizeof(struct wmi_mgmt_rx_ext_info));
+       }
        /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
         * trailer with credit update. Trim the excess garbage.
         */
@@ -2211,6 +2197,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
        struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
        size_t pull_len;
        u32 msdu_len;
+       struct wmi_mgmt_rx_ext_info *ext_info;
+       u32 len;
 
        ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
        ev_hdr = &ev->hdr;
@@ -2231,6 +2219,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
        if (skb->len < msdu_len)
                return -EPROTO;
 
+       if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+               len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+               ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+               memcpy(&arg->ext_info, ext_info,
+                      sizeof(struct wmi_mgmt_rx_ext_info));
+       }
+
        /* Make sure bytes added for padding are removed. */
        skb_trim(skb, msdu_len);
 
@@ -2281,14 +2276,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        if (rx_status & WMI_RX_STATUS_ERR_MIC)
                status->flag |= RX_FLAG_MMIC_ERROR;
 
+       if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+               status->mactime =
+                       __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+               status->flag |= RX_FLAG_MACTIME_END;
+       }
        /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
         * MODE_11B. This means phy_mode is not a reliable source for the band
         * of mgmt rx.
         */
        if (channel >= 1 && channel <= 14) {
-               status->band = IEEE80211_BAND_2GHZ;
+               status->band = NL80211_BAND_2GHZ;
        } else if (channel >= 36 && channel <= 165) {
-               status->band = IEEE80211_BAND_5GHZ;
+               status->band = NL80211_BAND_5GHZ;
        } else {
                /* Shouldn't happen unless list of advertised channels to
                 * mac80211 has been changed.
@@ -2298,7 +2298,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
                return 0;
        }
 
-       if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+       if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
                ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
 
        sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2310,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        hdr = (struct ieee80211_hdr *)skb->data;
        fc = le16_to_cpu(hdr->frame_control);
 
+       /* Firmware is guaranteed to report all essential management frames via
+        * WMI while it can deliver some extra via HTT. Since there can be
+        * duplicates split the reporting wrt monitor/sniffing.
+        */
+       status->flag |= RX_FLAG_SKIP_MONITOR;
+
        ath10k_wmi_handle_wep_reauth(ar, skb, status);
 
        /* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2357,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
        struct ieee80211_supported_band *sband;
        int band, ch, idx = 0;
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
                sband = ar->hw->wiphy->bands[band];
                if (!sband)
                        continue;
@@ -2612,6 +2618,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
        dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
 }
 
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+                               struct ath10k_fw_stats_peer *dst)
+{
+       ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+       dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+       dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+       dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
                                            struct sk_buff *skb,
                                            struct ath10k_fw_stats *stats)
@@ -2865,11 +2881,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
                const struct wmi_10_2_4_ext_peer_stats *src;
                struct ath10k_fw_stats_peer *dst;
                int stats_len;
-               bool ext_peer_stats_support;
 
-               ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
-                                                 ar->wmi.svc_map);
-               if (ext_peer_stats_support)
+               if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
                        stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
                else
                        stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2899,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
 
                dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
 
-               if (ext_peer_stats_support)
+               if (ath10k_peer_stats_enabled(ar))
                        dst->rx_duration = __le32_to_cpu(src->rx_duration);
                /* FIXME: expose 10.2 specific values */
 
@@ -2905,6 +2918,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
        u32 num_pdev_ext_stats;
        u32 num_vdev_stats;
        u32 num_peer_stats;
+       u32 stats_id;
        int i;
 
        if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2928,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
        num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
        num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
        num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+       stats_id = __le32_to_cpu(ev->stats_id);
 
        for (i = 0; i < num_pdev_stats; i++) {
                const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2968,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
        /* fw doesn't implement vdev stats */
 
        for (i = 0; i < num_peer_stats; i++) {
-               const struct wmi_10_4_peer_stats *src;
+               const struct wmi_10_4_peer_extd_stats *src;
                struct ath10k_fw_stats_peer *dst;
+               int stats_len;
+               bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
+
+               if (extd_peer_stats)
+                       stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
+               else
+                       stats_len = sizeof(struct wmi_10_4_peer_stats);
 
                src = (void *)skb->data;
-               if (!skb_pull(skb, sizeof(*src)))
+               if (!skb_pull(skb, stats_len))
                        return -EPROTO;
 
                dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
                if (!dst)
                        continue;
 
-               ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
-               dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
-               dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
-               dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+               ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
                /* FIXME: expose 10.4 specific values */
+               if (extd_peer_stats)
+                       dst->rx_duration = __le32_to_cpu(src->rx_duration);
 
                list_add_tail(&dst->list, &stats->peers);
        }
@@ -4584,10 +4605,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
        ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
                        arg.service_map, arg.service_map_len);
 
-       /* only manually set fw features when not using FW IE format */
-       if (ar->fw_api == 1 && ar->fw_version_build > 636)
-               set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
        if (ar->num_rf_chains > ar->max_spatial_stream) {
                ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
                            ar->num_rf_chains, ar->max_spatial_stream);
@@ -4617,10 +4634,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
        }
 
        if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+               if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                            ar->running_fw->fw_file.fw_features))
+                       ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+                                              ar->max_num_vdevs;
+               else
+                       ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+                                              ar->max_num_vdevs;
+
                ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
                                    ar->max_num_vdevs;
-               ar->num_active_peers = ar->hw_params.qcache_active_peers +
-                                      ar->max_num_vdevs;
                ar->num_tids = ar->num_active_peers * 2;
                ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
        }
@@ -5517,7 +5540,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
 
        config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
        config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
-       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+
+       if (ath10k_peer_stats_enabled(ar)) {
                config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
                config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
        } else {
@@ -5579,7 +5603,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
            test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
                features |= WMI_10_2_COEX_GPIO;
 
-       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+       if (ath10k_peer_stats_enabled(ar))
                features |= WMI_10_2_PEER_STATS;
 
        cmd->resource_config.feature_mask = __cpu_to_le32(features);
@@ -5800,9 +5824,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
                bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
 
                for (i = 0; i < arg->n_bssids; i++)
-                       memcpy(&bssids->bssid_list[i],
-                              arg->bssids[i].bssid,
-                              ETH_ALEN);
+                       ether_addr_copy(bssids->bssid_list[i].addr,
+                                       arg->bssids[i].bssid);
 
                ptr += sizeof(*bssids);
                ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -7484,6 +7507,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
        return -ENOTSUPP;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+                                   enum wmi_host_platform_type type,
+                                   u32 fw_feature_bitmap)
+{
+       struct wmi_ext_resource_config_10_4_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+       cmd->host_platform_config = __cpu_to_le32(type);
+       cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+                  type, fw_feature_bitmap);
+       return skb;
+}
+
 static const struct wmi_ops wmi_ops = {
        .rx = ath10k_wmi_op_rx,
        .map_svc = wmi_main_svc_map,
@@ -7810,6 +7855,7 @@ static const struct wmi_ops wmi_10_4_ops = {
        .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
        .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+       .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
 
        /* shared with 10.2 */
        .gen_request_stats = ath10k_wmi_op_gen_request_stats,
@@ -7819,7 +7865,7 @@ static const struct wmi_ops wmi_10_4_ops = {
 
 int ath10k_wmi_attach(struct ath10k *ar)
 {
-       switch (ar->wmi.op_version) {
+       switch (ar->running_fw->fw_file.wmi_op_version) {
        case ATH10K_FW_WMI_OP_VERSION_10_4:
                ar->wmi.ops = &wmi_10_4_ops;
                ar->wmi.cmd = &wmi_10_4_cmd_map;
@@ -7861,7 +7907,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
                ath10k_err(ar, "unsupported WMI op version: %d\n",
-                          ar->wmi.op_version);
+                          ar->running_fw->fw_file.wmi_op_version);
                return -EINVAL;
        }
 
index 4d3cbc44fcd27851ec46a7178e42d04f69ed3701..db2553522d8b73ab5b8b5d2f4405ce7dc4b89655 100644 (file)
@@ -180,6 +180,9 @@ enum wmi_service {
        WMI_SERVICE_MESH_NON_11S,
        WMI_SERVICE_PEER_STATS,
        WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+       WMI_SERVICE_TX_MODE_PUSH_ONLY,
+       WMI_SERVICE_TX_MODE_PUSH_PULL,
+       WMI_SERVICE_TX_MODE_DYNAMIC,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -302,6 +305,9 @@ enum wmi_10_4_service {
        WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
        WMI_10_4_SERVICE_PEER_STATS,
        WMI_10_4_SERVICE_MESH_11S,
+       WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+       WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+       WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -396,6 +402,9 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_MESH_NON_11S);
        SVCSTR(WMI_SERVICE_PEER_STATS);
        SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+       SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+       SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+       SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
        default:
                return NULL;
        }
@@ -405,8 +414,8 @@ static inline char *wmi_service_name(int service_id)
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
        ((svc_id) < (len) && \
-        __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
-        BIT((svc_id)%(sizeof(u32))))
+        __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+        BIT((svc_id) % (sizeof(u32))))
 
 #define SVCMAP(x, y, len) \
        do { \
@@ -643,6 +652,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_PEER_STATS, len);
        SVCMAP(WMI_10_4_SERVICE_MESH_11S,
               WMI_SERVICE_MESH_11S, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+              WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+              WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+              WMI_SERVICE_TX_MODE_DYNAMIC, len);
 }
 
 #undef SVCMAP
@@ -816,6 +831,7 @@ struct wmi_cmd_map {
        u32 set_cca_params_cmdid;
        u32 pdev_bss_chan_info_request_cmdid;
        u32 pdev_enable_adaptive_cca_cmdid;
+       u32 ext_resource_cfg_cmdid;
 };
 
 /*
@@ -1308,7 +1324,7 @@ enum wmi_10x_event_id {
        WMI_10X_PDEV_TPC_CONFIG_EVENTID,
 
        WMI_10X_GPIO_INPUT_EVENTID,
-       WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+       WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
 };
 
 enum wmi_10_2_cmd_id {
@@ -2041,8 +2057,8 @@ struct wmi_10x_service_ready_event {
        struct wlan_host_mem_req mem_reqs[0];
 } __packed;
 
-#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
-#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
 
 struct wmi_ready_event {
        __le32 sw_version;
@@ -2660,13 +2676,43 @@ struct wmi_resource_config_10_4 {
         */
        __le32 iphdr_pad_config;
 
-       /* qwrap configuration
+       /* qwrap configuration (bits 15-0)
         * 1  - This is qwrap configuration
         * 0  - This is not qwrap
+        *
+        * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+        * In order to get ack-RSSI reporting and to specify the tx-rate for
+        * individual frames, this option must be enabled.  This uses an extra
+        * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
         */
        __le32 qwrap_config;
 } __packed;
 
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+       WMI_10_4_LTEU_SUPPORT                   = BIT(0),
+       WMI_10_4_COEX_GPIO_SUPPORT              = BIT(1),
+       WMI_10_4_AUX_RADIO_SPECTRAL_INTF        = BIT(2),
+       WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF       = BIT(3),
+       WMI_10_4_BSS_CHANNEL_INFO_64            = BIT(4),
+       WMI_10_4_PEER_STATS                     = BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+       /* contains enum wmi_host_platform_type */
+       __le32 host_platform_config;
+       /* see enum wmi_10_4_feature_mask */
+       __le32 fw_feature_bitmap;
+};
+
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
        /* id of the request that is passed up in service ready */
@@ -3037,11 +3083,17 @@ struct wmi_10_4_mgmt_rx_event {
        u8 buf[0];
 } __packed;
 
+struct wmi_mgmt_rx_ext_info {
+       __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
 #define WMI_RX_STATUS_OK                       0x00
 #define WMI_RX_STATUS_ERR_CRC                  0x01
 #define WMI_RX_STATUS_ERR_DECRYPT              0x08
 #define WMI_RX_STATUS_ERR_MIC                  0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS       0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO         0x40
 
 #define PHY_ERROR_GEN_SPECTRAL_SCAN            0x26
 #define PHY_ERROR_GEN_FALSE_RADAR_EXT          0x24
@@ -4072,6 +4124,13 @@ enum wmi_stats_id {
        WMI_STAT_VDEV_RATE = BIT(5),
 };
 
+enum wmi_10_4_stats_id {
+       WMI_10_4_STAT_PEER              = BIT(0),
+       WMI_10_4_STAT_AP                = BIT(1),
+       WMI_10_4_STAT_INST              = BIT(2),
+       WMI_10_4_STAT_PEER_EXTD         = BIT(3),
+};
+
 struct wlan_inst_rssi_args {
        __le16 cfg_retry_count;
        __le16 retry_count;
@@ -4271,6 +4330,15 @@ struct wmi_10_4_peer_stats {
        __le32 peer_rssi_changed;
 } __packed;
 
+struct wmi_10_4_peer_extd_stats {
+       struct wmi_10_4_peer_stats common;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 inactive_time;
+       __le32 peer_chain_rssi;
+       __le32 rx_duration;
+       __le32 reserved[10];
+} __packed;
+
 struct wmi_10_2_pdev_ext_stats {
        __le32 rx_rssi_comb;
        __le32 rx_rssi[4];
@@ -4336,14 +4404,14 @@ enum wmi_vdev_subtype_10_4 {
 /*
  * Indicates that AP VDEV uses hidden ssid. only valid for
  *  AP/GO */
-#define WMI_VDEV_START_HIDDEN_SSID  (1<<0)
+#define WMI_VDEV_START_HIDDEN_SSID  (1 << 0)
 /*
  * Indicates if robust management frame/management frame
  *  protection is enabled. For GO/AP vdevs, it indicates that
  *  it may support station/client associations with RMF enabled.
  *  For STA/client vdevs, it indicates that sta will
  *  associate with AP with RMF enabled. */
-#define WMI_VDEV_START_PMF_ENABLED  (1<<1)
+#define WMI_VDEV_START_PMF_ENABLED  (1 << 1)
 
 struct wmi_p2p_noa_descriptor {
        __le32 type_count; /* 255: continuous schedule, 0: reserved */
@@ -4582,6 +4650,7 @@ struct wmi_vdev_param_map {
        u32 meru_vc;
        u32 rx_decap_type;
        u32 bw_nss_ratemask;
+       u32 set_tsf;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4838,6 +4907,7 @@ enum wmi_10x_vdev_param {
        WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
        WMI_10X_VDEV_PARAM_VHT_SGIMASK,
        WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+       WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 enum wmi_10_4_vdev_param {
@@ -4907,6 +4977,12 @@ enum wmi_10_4_vdev_param {
        WMI_10_4_VDEV_PARAM_MERU_VC,
        WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
        WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+       WMI_10_4_VDEV_PARAM_SENSOR_AP,
+       WMI_10_4_VDEV_PARAM_BEACON_RATE,
+       WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+       WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+       WMI_10_4_VDEV_PARAM_CAPABILITIES,
+       WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -5281,7 +5357,7 @@ enum wmi_sta_ps_param_pspoll_count {
 #define WMI_UAPSD_AC_TYPE_TRIG 1
 
 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \
-       ((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+       ((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
 
 enum wmi_sta_ps_param_uapsd {
        WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5696,7 +5772,7 @@ struct wmi_rate_set {
         * the rates are filled from least significant byte to most
         * significant byte.
         */
-       __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+       __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
 } __packed;
 
 struct wmi_rate_set_arg {
@@ -6116,6 +6192,7 @@ struct wmi_mgmt_rx_ev_arg {
        __le32 phy_mode;
        __le32 buf_len;
        __le32 status; /* %WMI_RX_STATUS_ */
+       struct wmi_mgmt_rx_ext_info ext_info;
 };
 
 struct wmi_ch_info_ev_arg {
@@ -6401,6 +6478,11 @@ struct wmi_pdev_set_adaptive_cca_params {
        __le32 cca_detect_margin;
 } __packed;
 
+enum wmi_host_platform_type {
+       WMI_HOST_PLATFORM_HIGH_PERF,
+       WMI_HOST_PLATFORM_LOW_PERF,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
index 8e02b381990f138c7e18c70b4d2789f6460d868e..77100d42f401f16c37a46afcf70e71007f5c7a11 100644 (file)
@@ -233,7 +233,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
 
        if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-                             ar->fw_features))) {
+                             ar->running_fw->fw_file.fw_features))) {
                ret = 1;
                goto exit;
        }
@@ -285,7 +285,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
        mutex_lock(&ar->conf_mutex);
 
        if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-                             ar->fw_features))) {
+                             ar->running_fw->fw_file.fw_features))) {
                ret = 1;
                goto exit;
        }
@@ -325,7 +325,8 @@ exit:
 
 int ath10k_wow_init(struct ath10k *ar)
 {
-       if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+       if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+                     ar->running_fw->fw_file.fw_features))
                return 0;
 
        if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
index 38be2702c0e23a485f886315a3d5966c13ba3848..0624333f5430d96ef95b62356c4d160146611911 100644 (file)
@@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
                if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
                        ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
                return;
-       } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+       } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
                /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
                 * detect and zero firstep level to maximize CCK sensitivity */
                ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
index ba12f7f4061dd0f481c2b228a0ecebd2f7518cd3..67fedb61fcc02dd3603658ad769ecbca928ccdaa 100644 (file)
@@ -1265,10 +1265,10 @@ struct ath5k_hw {
        void __iomem            *iobase;        /* address of the device */
        struct mutex            lock;           /* dev-level lock */
        struct ieee80211_hw     *hw;            /* IEEE 802.11 common */
-       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
        struct ieee80211_channel channels[ATH_CHAN_MAX];
-       struct ieee80211_rate   rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
-       s8                      rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+       struct ieee80211_rate   rates[NUM_NL80211_BANDS][AR5K_MAX_RATES];
+       s8                      rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES];
        enum nl80211_iftype     opmode;
 
 #ifdef CONFIG_ATH5K_DEBUG
@@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
 
 /* Protocol Control Unit Functions */
 /* Helpers */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
                int len, struct ieee80211_rate *rate, bool shortpre);
 unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
 unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
@@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
 
 /* PHY functions */
 /* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band);
 int ath5k_hw_phy_disable(struct ath5k_hw *ah);
 /* Gain_F optimization */
 enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
index 66b6366158b90a92054dbb9ce5cd97a665abc65c..233054bd6b52c4b4c852d995602422d6ccbbe472 100644 (file)
@@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
        ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
                        0xffffffff;
        ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
-                       IEEE80211_BAND_5GHZ);
+                       NL80211_BAND_5GHZ);
 
        /* Try to identify radio chip based on its srev */
        switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah)
                ah->ah_radio = AR5K_RF5111;
                ah->ah_single_chip = false;
                ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-                                                       IEEE80211_BAND_2GHZ);
+                                                       NL80211_BAND_2GHZ);
                break;
        case AR5K_SREV_RAD_5112:
        case AR5K_SREV_RAD_2112:
                ah->ah_radio = AR5K_RF5112;
                ah->ah_single_chip = false;
                ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-                                                       IEEE80211_BAND_2GHZ);
+                                                       NL80211_BAND_2GHZ);
                break;
        case AR5K_SREV_RAD_2413:
                ah->ah_radio = AR5K_RF2413;
@@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
                        ah->ah_radio = AR5K_RF5111;
                        ah->ah_single_chip = false;
                        ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-                                                       IEEE80211_BAND_2GHZ);
+                                                       NL80211_BAND_2GHZ);
                } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
                           ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
                           ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
index 3d946d8b2db2441222d4ea349fe4909de8e6dcf2..d98fd421c7ec58c3c815947a5fd8a0597eada4d5 100644 (file)
@@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy,
  * Returns true for the channel numbers used.
  */
 #ifdef CONFIG_ATH5K_TEST_CHANNELS
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
 {
        return true;
 }
 
 #else
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
 {
-       if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+       if (band == NL80211_BAND_2GHZ && chan <= 14)
                return true;
 
        return  /* UNII 1,2 */
@@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
                unsigned int mode, unsigned int max)
 {
        unsigned int count, size, freq, ch;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        switch (mode) {
        case AR5K_MODE_11A:
                /* 1..220, but 2GHz frequencies are filtered by check_channel */
                size = 220;
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
                break;
        case AR5K_MODE_11B:
        case AR5K_MODE_11G:
                size = 26;
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
                break;
        default:
                ATH5K_WARN(ah, "bad mode, not copying channels\n");
@@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
        int max_c, count_c = 0;
        int i;
 
-       BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+       BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
        max_c = ARRAY_SIZE(ah->channels);
 
        /* 2GHz band */
-       sband = &ah->sbands[IEEE80211_BAND_2GHZ];
-       sband->band = IEEE80211_BAND_2GHZ;
-       sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
+       sband = &ah->sbands[NL80211_BAND_2GHZ];
+       sband->band = NL80211_BAND_2GHZ;
+       sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
 
        if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
                /* G mode */
@@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11G, max_c);
 
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+               hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
                count_c = sband->n_channels;
                max_c -= count_c;
        } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
@@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11B, max_c);
 
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+               hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
                count_c = sband->n_channels;
                max_c -= count_c;
        }
@@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
 
        /* 5GHz band, A mode */
        if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
-               sband = &ah->sbands[IEEE80211_BAND_5GHZ];
-               sband->band = IEEE80211_BAND_5GHZ;
-               sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
+               sband = &ah->sbands[NL80211_BAND_5GHZ];
+               sband->band = NL80211_BAND_5GHZ;
+               sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
 
                memcpy(sband->bitrates, &ath5k_rates[4],
                       sizeof(struct ieee80211_rate) * 8);
@@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11A, max_c);
 
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+               hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
        }
        ath5k_setup_rate_idx(ah, sband);
 
index 654a1e33f8278743fdb6034e1681b2a3e4aa8515..929d7ccc031c2197d55d172f8a633c18425df35e 100644 (file)
@@ -1043,14 +1043,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
 
        BUG_ON(!ah->sbands);
 
-       for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+       for (b = 0; b < NUM_NL80211_BANDS; b++) {
                struct ieee80211_supported_band *band = &ah->sbands[b];
                char bname[6];
                switch (band->band) {
-               case IEEE80211_BAND_2GHZ:
+               case NL80211_BAND_2GHZ:
                        strcpy(bname, "2 GHz");
                        break;
-               case IEEE80211_BAND_5GHZ:
+               case NL80211_BAND_5GHZ:
                        strcpy(bname, "5 GHz");
                        break;
                default:
index 803030fd17d3b9203dd74535da40fe1de08284d6..6a2a168567630fdbf8ea47818c3a336876b04df4 100644 (file)
@@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = {
        /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
        { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
        /* HP Compaq C700 (nitrousnrg@gmail.com) */
-       { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+       { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
        /* LiteOn AR5BXB63 (magooz@salug.it) */
        { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
        /* IBM-specific AR5212 (all others) */
index bf29da5e90da05e7d75a18818d5f086b000e4229..fc47b70988b1692938300e29c3dd20293efaff21 100644 (file)
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
  * bwmodes.
  */
 int
-ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
                int len, struct ieee80211_rate *rate, bool shortpre)
 {
        int sifs, preamble, plcp_bits, sym_time;
@@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
        case AR5K_BWMODE_DEFAULT:
                sifs = AR5K_INIT_SIFS_DEFAULT_BG;
        default:
-               if (channel->band == IEEE80211_BAND_5GHZ)
+               if (channel->band == NL80211_BAND_5GHZ)
                        sifs = AR5K_INIT_SIFS_DEFAULT_A;
                break;
        }
@@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
        struct ieee80211_rate *rate;
        unsigned int i;
        /* 802.11g covers both OFDM and CCK */
-       u8 band = IEEE80211_BAND_2GHZ;
+       u8 band = NL80211_BAND_2GHZ;
 
        /* Write rate duration table */
        for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
index 0fce1c76638e9c9ae103e01ac6752dced8b1670d..641b13a279e18a0a681f87ee1965184cc97a1070 100644 (file)
 /**
  * ath5k_hw_radio_revision() - Get the PHY Chip revision
  * @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
  *
  * Returns the revision number of a 2GHz, 5GHz or single chip
  * radio.
  */
 u16
-ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
 {
        unsigned int i;
        u32 srev;
@@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
         * Set the radio chip access register
         */
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
                break;
        default:
@@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
        u16 freq = channel->center_freq;
 
        /* Check if the channel is in our supported range */
-       if (channel->band == IEEE80211_BAND_2GHZ) {
+       if (channel->band == NL80211_BAND_2GHZ) {
                if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
                    (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
                        return true;
-       } else if (channel->band == IEEE80211_BAND_5GHZ)
+       } else if (channel->band == NL80211_BAND_5GHZ)
                if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
                    (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
                        return true;
@@ -743,7 +743,7 @@ done:
 /**
  * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
  * @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
  *
  * Write initial RF gain table to set the RF sensitivity.
  *
@@ -751,7 +751,7 @@ done:
  * with Gain_F calibration
  */
 static int
-ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
 {
        const struct ath5k_ini_rfgain *ath5k_rfg;
        unsigned int i, size, index;
@@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
                return -EINVAL;
        }
 
-       index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
+       index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
 
        for (i = 0; i < size; i++) {
                AR5K_REG_WAIT(i);
@@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
        }
 
        /* Set Output and Driver bias current (OB/DB) */
-       if (channel->band == IEEE80211_BAND_2GHZ) {
+       if (channel->band == NL80211_BAND_2GHZ) {
 
                if (channel->hw_value == AR5K_MODE_11B)
                        ee_mode = AR5K_EEPROM_MODE_11B;
@@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
                                                AR5K_RF_DB_2GHZ, true);
 
        /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
-       } else if ((channel->band == IEEE80211_BAND_5GHZ) ||
+       } else if ((channel->band == NL80211_BAND_5GHZ) ||
                        (ah->ah_radio == AR5K_RF5111)) {
 
                /* For 11a, Turbo and XR we need to choose
@@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
        }
 
        if (ah->ah_radio == AR5K_RF5413 &&
-       channel->band == IEEE80211_BAND_2GHZ) {
+       channel->band == NL80211_BAND_2GHZ) {
 
                ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
                                                                        true);
@@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
         */
        data0 = data1 = 0;
 
-       if (channel->band == IEEE80211_BAND_2GHZ) {
+       if (channel->band == NL80211_BAND_2GHZ) {
                /* Map 2GHz channel to 5GHz Atheros channel ID */
                ret = ath5k_hw_rf5111_chan2athchan(
                        ieee80211_frequency_to_channel(channel->center_freq),
@@ -1446,7 +1446,7 @@ ath5k_hw_channel(struct ath5k_hw *ah,
                        "channel frequency (%u MHz) out of supported "
                        "band range\n",
                        channel->center_freq);
-                       return -EINVAL;
+               return -EINVAL;
        }
 
        /*
@@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
        /* Convert current frequency to fbin value (the same way channels
         * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
         * up by 2 so we can compare it later */
-       if (channel->band == IEEE80211_BAND_2GHZ) {
+       if (channel->band == NL80211_BAND_2GHZ) {
                chan_fbin = (channel->center_freq - 2300) * 10;
                freq_band = AR5K_EEPROM_BAND_2GHZ;
        } else {
@@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
                        symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
                        break;
                default:
-                       if (channel->band == IEEE80211_BAND_5GHZ) {
+                       if (channel->band == NL80211_BAND_5GHZ) {
                                /* Both sample_freq and chip_freq are 40MHz */
                                spur_delta_phase = (spur_offset << 17) / 25;
                                spur_freq_sigma_delta =
index ddaad712c59a814b52370f4b1a46ad8309b58c48..beda11ce34a737a6fba3269c36ae2d47b4cd1369 100644 (file)
@@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_rate *rate;
        u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
@@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
         *
         * Also we have different lowest rate for 802.11a
         */
-       if (channel->band == IEEE80211_BAND_5GHZ)
-               band = IEEE80211_BAND_5GHZ;
+       if (channel->band == NL80211_BAND_5GHZ)
+               band = NL80211_BAND_5GHZ;
        else
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
 
        switch (ah->ah_bwmode) {
        case AR5K_BWMODE_5MHZ:
index 99e62f99a182db5c205a7dca60492b4e312f6f84..56d7925a0c2cdadc30f88c3125abe58610bbd8bb 100644 (file)
@@ -634,7 +634,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       usleep_range(2000, 2500);
+               usleep_range(2000, 2500);
        } else {
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -699,7 +699,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       usleep_range(2000, 2500);
+               usleep_range(2000, 2500);
        } else {
                if (ath5k_get_bus_type(ah) == ATH_AHB)
                        ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                        clock = AR5K_PHY_PLL_RF5111;            /*Zero*/
                }
 
-               if (channel->band == IEEE80211_BAND_2GHZ) {
+               if (channel->band == NL80211_BAND_2GHZ) {
                        mode |= AR5K_PHY_MODE_FREQ_2GHZ;
                        clock |= AR5K_PHY_PLL_44MHZ;
 
@@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                                else
                                        mode |= AR5K_PHY_MODE_MOD_DYN;
                        }
-               } else if (channel->band == IEEE80211_BAND_5GHZ) {
+               } else if (channel->band == NL80211_BAND_5GHZ) {
                        mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
                                 AR5K_PHY_MODE_MOD_OFDM);
 
@@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
                u32 data;
                ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
                                AR5K_PHY_CCKTXCTL);
-               if (channel->band == IEEE80211_BAND_5GHZ)
+               if (channel->band == NL80211_BAND_5GHZ)
                        data = 0xffb81020;
                else
                        data = 0xffb80d20;
index 7f3f94fbf1571612623e1214153f236faf1128af..4e11ba06f08981d544e05ce25a73d26cb3f00922 100644 (file)
@@ -34,7 +34,7 @@
 }
 
 #define CHAN2G(_channel, _freq, _flags) {   \
-       .band           = IEEE80211_BAND_2GHZ,  \
+       .band           = NL80211_BAND_2GHZ,  \
        .hw_value       = (_channel),           \
        .center_freq    = (_freq),              \
        .flags          = (_flags),             \
@@ -43,7 +43,7 @@
 }
 
 #define CHAN5G(_channel, _flags) {                 \
-       .band           = IEEE80211_BAND_5GHZ,      \
+       .band           = NL80211_BAND_5GHZ,      \
        .hw_value       = (_channel),               \
        .center_freq    = 5000 + (5 * (_channel)),  \
        .flags          = (_flags),                 \
@@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
 }
 #endif
 
-static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band,
                            bool ht_enable)
 {
        struct ath6kl_htcap *htcap = &vif->htcap[band];
@@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
        if (ht_enable) {
                /* Set default ht capabilities */
                htcap->ht_enable = true;
-               htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+               htcap->cap_info = (band == NL80211_BAND_2GHZ) ?
                                   ath6kl_g_htcap : ath6kl_a_htcap;
                htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
        } else /* Disable ht */
@@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
        struct wiphy *wiphy = vif->ar->wiphy;
        int band, ret = 0;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!wiphy->bands[band])
                        continue;
 
@@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
                                       struct regulatory_request *request)
 {
        struct ath6kl *ar = wiphy_priv(wiphy);
-       u32 rates[IEEE80211_NUM_BANDS];
+       u32 rates[NUM_NL80211_BANDS];
        int ret, i;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
         * changed.
         */
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+       for (i = 0; i < NUM_NL80211_BANDS; i++)
                if (wiphy->bands[i])
                        rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
@@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
        vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
        vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
        vif->bg_scan_period = 0;
-       vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
-       vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
+       vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
+       vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
 
        memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
        if (fw_vif_idx != 0) {
@@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
        wiphy->available_antennas_rx = ar->hw.rx_ant;
 
        if (band_2gig)
-               wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+               wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz;
        if (band_5gig)
-               wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+               wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz;
 
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
index 5f3acfe6015e55b3f4b5f9e8ade1329b68384602..713a571a27ce3b0dc8c02d8d0f6a4599d74f6190 100644 (file)
@@ -623,7 +623,7 @@ struct ath6kl_vif {
        struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
        struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
        struct aggr_info *aggr_cntxt;
-       struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
+       struct ath6kl_htcap htcap[NUM_NL80211_BANDS];
 
        struct timer_list disconnect_timer;
        struct timer_list sched_scan_timer;
index a5e1de75a4a3d1517ef25e9ac77591c8adee31f3..631c3a0c572b698c14a096312ed212eb11d95a2c 100644 (file)
@@ -1584,6 +1584,11 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
        if (len < sizeof(*ev))
                return -EINVAL;
 
+       if (vif->nw_type != INFRA_NETWORK ||
+           !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+                     vif->ar->fw_capabilities))
+               return -EOPNOTSUPP;
+
        if (vif->sme_state != SME_CONNECTED)
                return -ENOTCONN;
 
@@ -2043,7 +2048,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
        sc->no_cck = cpu_to_le32(no_cck);
        sc->num_ch = num_chan;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = ar->wiphy->bands[band];
 
                if (!sband)
@@ -2765,10 +2770,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
        memset(&ratemask, 0, sizeof(ratemask));
 
        /* only check 2.4 and 5 GHz bands, skip the rest */
-       for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+       for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
                /* copy legacy rate mask */
                ratemask[band] = mask->control[band].legacy;
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        ratemask[band] =
                                mask->control[band].legacy << 4;
 
@@ -2794,9 +2799,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
                if (mode == WMI_RATES_MODE_11A ||
                    mode == WMI_RATES_MODE_11A_HT20 ||
                    mode == WMI_RATES_MODE_11A_HT40)
-                       band = IEEE80211_BAND_5GHZ;
+                       band = NL80211_BAND_5GHZ;
                else
-                       band = IEEE80211_BAND_2GHZ;
+                       band = NL80211_BAND_2GHZ;
                cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
        }
 
@@ -2817,10 +2822,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
        memset(&ratemask, 0, sizeof(ratemask));
 
        /* only check 2.4 and 5 GHz bands, skip the rest */
-       for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+       for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
                /* copy legacy rate mask */
                ratemask[band] = mask->control[band].legacy;
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        ratemask[band] =
                                mask->control[band].legacy << 4;
 
@@ -2844,9 +2849,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
                if (mode == WMI_RATES_MODE_11A ||
                    mode == WMI_RATES_MODE_11A_HT20 ||
                    mode == WMI_RATES_MODE_11A_HT40)
-                       band = IEEE80211_BAND_5GHZ;
+                       band = NL80211_BAND_5GHZ;
                else
-                       band = IEEE80211_BAND_2GHZ;
+                       band = NL80211_BAND_2GHZ;
                cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
        }
 
@@ -3169,7 +3174,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
 }
 
 int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
-                            enum ieee80211_band band,
+                            enum nl80211_band band,
                             struct ath6kl_htcap *htcap)
 {
        struct sk_buff *skb;
@@ -3182,7 +3187,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
        cmd = (struct wmi_set_htcap_cmd *) skb->data;
 
        /*
-        * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+        * NOTE: Band in firmware matches enum nl80211_band, it is unlikely
         * this will be changed in firmware. If at all there is any change in
         * band value, the host needs to be fixed.
         */
index 05d25a94c781139a0097676f32817126d72f5e5c..3af464a73b58ca00759b21fba892ad5f64ac81ac 100644 (file)
@@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
 int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
                                 u8 keep_alive_intvl);
 int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
-                            enum ieee80211_band band,
+                            enum nl80211_band band,
                             struct ath6kl_htcap *htcap);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
index 8f8793004b9f021c7f689e13dabcf32f10e2c3a0..1b271b99c49eee91a96639ee019d370f61d0cda0 100644 (file)
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        };
        static const int inc[4] = { 0, 100, 0, 0 };
 
+       memset(&mask_m, 0, sizeof(int8_t) * 123);
+       memset(&mask_p, 0, sizeof(int8_t) * 123);
+
        cur_bin = -6000;
        upper = bin + 100;
        lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
        int tmp, new;
        int i;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
                cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
                if (AR_NO_SPUR == cur_bb_spur)
index db6624527d9959d3ffd10a32f3fa76cb0b6dff83..53d7445a5d12c1edaaf012e4e853560ec1f75e39 100644 (file)
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
        int i;
        struct chan_centers centers;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        ath9k_hw_get_channel_centers(ah, chan, &centers);
        freq = centers.synth_center;
 
index c38399bc9aa96e84fce4929319599e1d4db694c7..c07866a2fdf96bb9dd8f77d6bc0f9e8c3e44edfc 100644 (file)
@@ -331,7 +331,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -351,7 +351,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index 0c391997a2f77709cf5f7531dbcdeadc338b4aed..518e649ecff383898288f3f154f9fe7af32897f5 100644 (file)
@@ -1203,12 +1203,12 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
 static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 {
        int offset[8] = {0}, total = 0, test;
-       int agc_out, i, peak_detect_threshold;
+       int agc_out, i, peak_detect_threshold = 0;
 
        if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
                peak_detect_threshold = 8;
-       else
-               peak_detect_threshold = 0;
+       else if (AR_SREV_9561(ah))
+               peak_detect_threshold = 11;
 
        /*
         * Turn off LNA/SW.
@@ -1249,17 +1249,14 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
                REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
                              AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
 
-       if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
-           AR_SREV_9561(ah)) {
-               if (is_2g)
-                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-                                     AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
-                                     peak_detect_threshold);
-               else
-                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-                                     AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
-                                     peak_detect_threshold);
-       }
+       if (is_2g)
+               REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+                             AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+                             peak_detect_threshold);
+       else
+               REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+                             AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+                             peak_detect_threshold);
 
        for (i = 6; i > 0; i--) {
                offset[i] = BIT(i - 1);
@@ -1311,9 +1308,6 @@ static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
        struct ath9k_hw_cal_data *caldata = ah->caldata;
        int i;
 
-       if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
-               return;
-
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
                return;
 
@@ -1641,14 +1635,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
 
 skip_tx_iqcal:
        if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
-               if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
-                   AR_SREV_9561(ah)) {
-                       for (i = 0; i < AR9300_MAX_CHAINS; i++) {
-                               if (!(ah->rxchainmask & (1 << i)))
-                                       continue;
-                               ar9003_hw_manual_peak_cal(ah, i,
-                                                         IS_CHAN_2GHZ(chan));
-                       }
+               for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+                       if (!(ah->rxchainmask & (1 << i)))
+                               continue;
+
+                       ar9003_hw_manual_peak_cal(ah, i,
+                                                 IS_CHAN_2GHZ(chan));
                }
 
                /*
@@ -1709,7 +1701,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
        struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
        struct ath_hw_ops *ops = ath9k_hw_ops(ah);
 
-       if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+       if (AR_SREV_9003_PCOEM(ah))
                priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
        else
                priv_ops->init_cal = ar9003_hw_init_cal_soc;
index 54ed2f72d35eb27c4a8998a83463afcaae0c8cc1..f68098284c43be41562b3f126cbe1409134a1868 100644 (file)
@@ -3590,8 +3590,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                else
                        gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
 
-               ath9k_hw_cfg_output(ah, gpio,
-                                   AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+               ath9k_hw_gpio_request_out(ah, gpio, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
        }
 
        value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
@@ -4097,16 +4097,16 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
                REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
                              AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
 
-       therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+       therm_on = thermometer == 0;
        REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
                      AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
        if (pCap->chip_chainmask & BIT(1)) {
-               therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+               therm_on = thermometer == 1;
                REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
                              AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
        }
        if (pCap->chip_chainmask & BIT(2)) {
-               therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+               therm_on = thermometer == 2;
                REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
                              AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
        }
index af5ee416a560dab726805c58850cddfe65291e03..0fe9c8378249ff62ecef303251c29091e0b1c655 100644 (file)
@@ -427,21 +427,34 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
 
        if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
-               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
-               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
-               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
-               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+               ath9k_hw_gpio_request_out(ah, 3, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+               ath9k_hw_gpio_request_out(ah, 2, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+               ath9k_hw_gpio_request_out(ah, 1, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_gpio_request_out(ah, 0, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
        } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
-               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
-               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
-               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
-               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
-               ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+               ath9k_hw_gpio_request_out(ah, 3, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+               ath9k_hw_gpio_request_out(ah, 2, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+               ath9k_hw_gpio_request_out(ah, 1, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_gpio_request_out(ah, 0, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_gpio_request_out(ah, 5, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
        } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
-               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
-               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
-               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
-               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+               ath9k_hw_gpio_request_out(ah, 3, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_gpio_request_out(ah, 2, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_gpio_request_out(ah, 1, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_gpio_request_out(ah, 0, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
        } else
                return;
 
index 06c1ca6e829053efbd2d8c8620649792324f394d..be14a8e019167b9e21e6b7c98dfab4b9ab52c418 100644 (file)
@@ -1337,11 +1337,11 @@ skip_ws_det:
                                chan->channel,
                                aniState->mrcCCK ? "on" : "off",
                                is_on ? "on" : "off");
-               if (is_on)
-                       ah->stats.ast_ani_ccklow++;
-               else
-                       ah->stats.ast_ani_cckhigh++;
-               aniState->mrcCCK = is_on;
+                       if (is_on)
+                               ah->stats.ast_ani_ccklow++;
+                       else
+                               ah->stats.ast_ani_cckhigh++;
+                       aniState->mrcCCK = is_on;
                }
        break;
        }
index 2c42ff05efa38f507cdd0f54905d8b79c0c77d32..29479afbc4f10fd2da4822bc16bb254d2daadce5 100644 (file)
@@ -40,7 +40,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
-       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -59,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index 2154efcd3900514af944619a174db08d2514a140..c4a6ffa55e8cbb76a5dcc4b00571180137cfd546 100644 (file)
@@ -345,7 +345,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
-       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -364,7 +364,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index b995ffe88b33bb8af6ca6a7aaa47e23adee34857..2eb163fc1c18f23fb7c7f54056653fcd92c7fabf 100644 (file)
@@ -245,7 +245,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -265,7 +265,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index 1b6b4d0cfa97a5df8c2a7e0d631c417377ba17b2..b00dd649453dfd97080d4c2b5f9e1e33f8e8e996 100644 (file)
@@ -59,7 +59,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
        {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -79,7 +79,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index dc3adda46e8b92700ee1c274ba734fc2c4ec7a4f..0f8745ec73b1ddabdc223bdc95d24de7de16be29 100644 (file)
@@ -239,7 +239,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
        {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -259,7 +259,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index ce83ce47a1ca9b7b28dc68e6b414f21236392117..bdf6f107f6f1e25082377e203e0d1a60c046625f 100644 (file)
@@ -1026,7 +1026,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
        {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
        {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
        {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -1044,7 +1044,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index c0b90daa3e3d8646f2e064e0f9bbe561d1f5a7d7..924ae6bde7f1200bbeea3b6798b680831fca70da 100644 (file)
@@ -988,7 +988,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
        {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1008,7 +1008,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
        {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
        {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
index 148562addd38fd03e5884ffaa37be7b9503e9c4b..67edf344b4273280f7b3f3a4dd64556647b30bd5 100644 (file)
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
        {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
index 10d4a6cb1c3b96730d3056a70aa27124ebd8c5b0..35c1bbb2fa8a273be6594031ad78f4dfec984839 100644 (file)
@@ -347,7 +347,7 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
index c3a47eaaf0c08062da110be5c745495437b57dfc..db051071c6765a466f9c6ab08689d83684f50591 100644 (file)
@@ -220,7 +220,7 @@ static const u32 qca956x_1p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
        {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
index 5d4629f96c15ab582d359f78105d17fc0258316b..f4c9befb39493da8d80c6be68c945782968196d3 100644 (file)
@@ -1290,7 +1290,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1310,7 +1310,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
        {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
index 5294595da5a7eb45a63e14a0bbceeae1b2ef343a..93b3793cce2f9a865b5898de8e5a28b6302c6b43 100644 (file)
@@ -813,7 +813,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
-void ath_fill_led_pin(struct ath_softc *sc);
 #else
 static inline void ath_init_leds(struct ath_softc *sc)
 {
@@ -822,9 +821,6 @@ static inline void ath_init_leds(struct ath_softc *sc)
 static inline void ath_deinit_leds(struct ath_softc *sc)
 {
 }
-static inline void ath_fill_led_pin(struct ath_softc *sc)
-{
-}
 #endif
 
 /************************/
index 5a084d94ed90793f22d5964bee6114a63a26f605..618c9df35fc1f30eb4ef1a3f9b4aaaa4b6aaab87 100644 (file)
@@ -15,6 +15,8 @@
  */
 
 #include <linux/export.h>
+#include <linux/types.h>
+#include <linux/ath9k_platform.h>
 #include "hw.h"
 
 enum ath_bt_mode {
@@ -34,6 +36,8 @@ struct ath_btcoex_config {
        u8 bt_priority_time;
        u8 bt_first_slot_time;
        bool bt_hold_rx_clear;
+       u8 wl_active_time;
+       u8 wl_qc_time;
 };
 
 static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
@@ -65,31 +69,71 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
                .bt_priority_time = 2,
                .bt_first_slot_time = 5,
                .bt_hold_rx_clear = true,
+               .wl_active_time = 0x20,
+               .wl_qc_time = 0x20,
        };
        bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+       u8 time_extend = ath_bt_config.bt_time_extend;
+       u8 first_slot_time = ath_bt_config.bt_first_slot_time;
 
        if (AR_SREV_9300_20_OR_LATER(ah))
                rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
 
+       if (AR_SREV_SOC(ah)) {
+               first_slot_time = 0x1d;
+               time_extend = 0xa;
+
+               btcoex_hw->bt_coex_mode3 =
+                       SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) |
+                       SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME);
+
+               btcoex_hw->bt_coex_mode2 =
+                       AR_BT_PROTECT_BT_AFTER_WAKEUP |
+                       AR_BT_PHY_ERR_BT_COLL_ENABLE;
+       }
+
        btcoex_hw->bt_coex_mode =
                (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
-               SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
+               SM(time_extend, AR_BT_TIME_EXTEND) |
                SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
                SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
                SM(ath_bt_config.bt_mode, AR_BT_MODE) |
                SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
                SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
                SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
-               SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
+               SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) |
                SM(qnum, AR_BT_QCU_THRESH);
 
-       btcoex_hw->bt_coex_mode2 =
+       btcoex_hw->bt_coex_mode2 |=
                SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
                SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
                AR_BT_DISABLE_BT_ANT;
 }
 EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
 
+static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
+                                    u8 btactive_gpio, u8 btpriority_gpio)
+{
+       struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+       struct ath9k_platform_data *pdata = ah->dev->platform_data;
+
+       if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
+           btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
+               return;
+
+       /* bt priority GPIO will be ignored by 2 wire scheme */
+       if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
+                     pdata->wlan_active_pin)) {
+               btcoex_hw->btactive_gpio = pdata->bt_active_pin;
+               btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
+               btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
+       } else {
+               btcoex_hw->btactive_gpio = btactive_gpio;
+               btcoex_hw->wlanactive_gpio = wlanactive_gpio;
+               btcoex_hw->btpriority_gpio = btpriority_gpio;
+       }
+}
+
 void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
@@ -107,19 +151,19 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
                btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
        } else if (AR_SREV_9300_20_OR_LATER(ah)) {
                btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
-               btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
-               btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
-               btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
-       } else if (AR_SREV_9280_20_OR_LATER(ah)) {
-               btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
-               btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
 
-               if (AR_SREV_9285(ah)) {
+               ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300,
+                                        ATH_BTACTIVE_GPIO_9300,
+                                        ATH_BTPRIORITY_GPIO_9300);
+       } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+               if (AR_SREV_9285(ah))
                        btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
-                       btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
-               } else {
+               else
                        btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
-               }
+
+               ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280,
+                                        ATH_BTACTIVE_GPIO_9280,
+                                        ATH_BTPRIORITY_GPIO_9285);
        }
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
@@ -137,12 +181,14 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
                    AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
 
        /* Set input mux for bt_active to gpio pin */
-       REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-                     AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-                     btcoex_hw->btactive_gpio);
+       if (!AR_SREV_SOC(ah))
+               REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+                             AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+                             btcoex_hw->btactive_gpio);
 
        /* Configure the desired gpio port for input */
-       ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+       ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+                                "ath9k-btactive");
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
 
@@ -157,21 +203,33 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
 
        /* Set input mux for bt_prority_async and
         *                  bt_active_async to GPIO pins */
-       REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-                       AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-                       btcoex_hw->btactive_gpio);
-
-       REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-                       AR_GPIO_INPUT_MUX1_BT_PRIORITY,
-                       btcoex_hw->btpriority_gpio);
+       if (!AR_SREV_SOC(ah)) {
+               REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+                             AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+                             btcoex_hw->btactive_gpio);
+               REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+                             AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+                             btcoex_hw->btpriority_gpio);
+       }
 
        /* Configure the desired GPIO ports for input */
-
-       ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
-       ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
+       ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+                                "ath9k-btactive");
+       ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio,
+                                "ath9k-btpriority");
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
 
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah)
+{
+       struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+       ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio);
+       ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio);
+       ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_deinit);
+
 void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
 {
        ah->btcoex_hw.mci.ready = false;
@@ -201,8 +259,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
        /* Configure the desired GPIO port for TX_FRAME output */
-       ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
-                           AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
+       ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+                                 "ath9k-wlanactive",
+                                 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
 }
 
 /*
@@ -247,13 +306,13 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
                                 txprio_shift[i-1]);
                }
        }
+
        /* Last WLAN weight has to be adjusted wrt tx priority */
        if (concur_tx) {
                btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
                btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
                                                      << txprio_shift[i-1]);
        }
-
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
@@ -268,9 +327,14 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
         * Program coex mode and weight registers to
         * enable coex 3-wire
         */
+       if (AR_SREV_SOC(ah))
+               REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE);
+
        REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
        REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
 
+       if (AR_SREV_SOC(ah))
+               REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3);
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
@@ -281,8 +345,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
        } else
                REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
 
-
-
        if (AR_SREV_9271(ah)) {
                val = REG_READ(ah, 0x50040);
                val &= 0xFFFFFEFF;
@@ -292,8 +354,9 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
        REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
        REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
-       ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
-                           AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
+       ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio,
+                                 "ath9k-wlanactive",
+                                 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
 }
 
 static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
@@ -339,7 +402,8 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
                break;
        }
 
-       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
+           !AR_SREV_SOC(ah)) {
                REG_RMW(ah, AR_GPIO_PDPU,
                        (0x2 << (btcoex_hw->btactive_gpio * 2)),
                        (0x3 << (btcoex_hw->btactive_gpio * 2)));
@@ -364,8 +428,8 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
        if (!AR_SREV_9300_20_OR_LATER(ah))
                ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
-       ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
-                       AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+                                 NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 
        if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
                REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
index cd2f0a2373cb92f7eeb47f0fa0ab21e5a78c3a9e..1bdfa8465b9260df85345431d29711793f5722e5 100644 (file)
@@ -115,6 +115,7 @@ struct ath_btcoex_hw {
        u32 bt_coex_mode;       /* Register setting for AR_BT_COEX_MODE */
        u32 bt_coex_weights;    /* Register setting for AR_BT_COEX_WEIGHT */
        u32 bt_coex_mode2;      /* Register setting for AR_BT_COEX_MODE2 */
+       u32 bt_coex_mode3;      /* Register setting for AR_BT_COEX_MODE3 */
        u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
        u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
        u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
@@ -123,6 +124,7 @@ struct ath_btcoex_hw {
 void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
index 37f6d66d16715932771ef24a74a70382e11270ec..0f71146b781d12dfcb5907fdecb3843b4260ad4b 100644 (file)
@@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
 }
 
 static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
-                                  enum ieee80211_band band,
+                                  enum nl80211_band band,
                                   int16_t *nft)
 {
        switch (band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
                break;
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
                break;
        default:
index 319cb5f25f58d83fbdaab226698c8f021a3e897b..e56bafcf58640367b02a95042c4739a46a15915c 100644 (file)
@@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc)
        struct ieee80211_channel *chan;
        int i, j;
 
-       sband = &common->sbands[IEEE80211_BAND_2GHZ];
+       sband = &common->sbands[NL80211_BAND_2GHZ];
        if (!sband->n_channels)
-               sband = &common->sbands[IEEE80211_BAND_5GHZ];
+               sband = &common->sbands[NL80211_BAND_5GHZ];
 
        chan = &sband->channels[0];
        for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
@@ -1333,9 +1333,9 @@ void ath9k_offchannel_init(struct ath_softc *sc)
        struct ieee80211_channel *chan;
        int i;
 
-       sband = &common->sbands[IEEE80211_BAND_2GHZ];
+       sband = &common->sbands[NL80211_BAND_2GHZ];
        if (!sband->n_channels)
-               sband = &common->sbands[IEEE80211_BAND_5GHZ];
+               sband = &common->sbands[NL80211_BAND_5GHZ];
 
        chan = &sband->channels[0];
 
index a006c1499728b080cd6fdf8f00433efe31809dc9..8b4f7fdabf5803ee8edbf45b6c18a0a24a0082de 100644 (file)
 #include "common.h"
 
 #define CHAN2G(_freq, _idx)  { \
-       .band = IEEE80211_BAND_2GHZ, \
+       .band = NL80211_BAND_2GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
        .max_power = 20, \
 }
 
 #define CHAN5G(_freq, _idx) { \
-       .band = IEEE80211_BAND_5GHZ, \
+       .band = NL80211_BAND_5GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
        .max_power = 20, \
@@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
 
                memcpy(channels, ath9k_2ghz_chantable,
                       sizeof(ath9k_2ghz_chantable));
-               common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
-               common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
-               common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+               common->sbands[NL80211_BAND_2GHZ].channels = channels;
+               common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ;
+               common->sbands[NL80211_BAND_2GHZ].n_channels =
                        ARRAY_SIZE(ath9k_2ghz_chantable);
-               common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
-               common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+               common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+               common->sbands[NL80211_BAND_2GHZ].n_bitrates =
                        ARRAY_SIZE(ath9k_legacy_rates);
        }
 
@@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
 
                memcpy(channels, ath9k_5ghz_chantable,
                       sizeof(ath9k_5ghz_chantable));
-               common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
-               common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
-               common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+               common->sbands[NL80211_BAND_5GHZ].channels = channels;
+               common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ;
+               common->sbands[NL80211_BAND_5GHZ].n_channels =
                        ARRAY_SIZE(ath9k_5ghz_chantable);
-               common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+               common->sbands[NL80211_BAND_5GHZ].bitrates =
                        ath9k_legacy_rates + 4;
-               common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+               common->sbands[NL80211_BAND_5GHZ].n_bitrates =
                        ARRAY_SIZE(ath9k_legacy_rates) - 4;
        }
        return 0;
@@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
                ath9k_cmn_setup_ht_cap(ah,
-                       &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+                       &common->sbands[NL80211_BAND_2GHZ].ht_cap);
        if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
                ath9k_cmn_setup_ht_cap(ah,
-                       &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+                       &common->sbands[NL80211_BAND_5GHZ].ht_cap);
 }
 EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
index e8c699446470cb9e33dc5f407ebfc81b5197af1b..b80e08b13b7439bac045f3242174bbd83e09e4a9 100644 (file)
@@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common,
                           struct ieee80211_rx_status *rxs)
 {
        struct ieee80211_supported_band *sband;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        unsigned int i = 0;
        struct ath_hw *ah = common->ah;
 
@@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
        ichan->channel = chan->center_freq;
        ichan->chan = chan;
 
-       if (chan->band == IEEE80211_BAND_5GHZ)
+       if (chan->band == NL80211_BAND_5GHZ)
                flags |= CHANNEL_5GHZ;
 
        switch (chandef->width) {
index 6de64cface3c26d3ff4dba5c9b0b99305a8dd348..c56e40ff35e5fafcaa58433ede46bc53de9d4c69 100644 (file)
@@ -916,10 +916,21 @@ static int open_file_regdump(struct inode *inode, struct file *file)
        struct ath_softc *sc = inode->i_private;
        unsigned int len = 0;
        u8 *buf;
-       int i;
+       int i, j = 0;
        unsigned long num_regs, regdump_len, max_reg_offset;
+       const struct reg_hole {
+               u32 start;
+               u32 end;
+       } reg_hole_list[] = {
+               {0x0200, 0x07fc},
+               {0x0c00, 0x0ffc},
+               {0x2000, 0x3ffc},
+               {0x4100, 0x6ffc},
+               {0x705c, 0x7ffc},
+               {0x0000, 0x0000}
+       };
 
-       max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+       max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500;
        num_regs = max_reg_offset / 4 + 1;
        regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
        buf = vmalloc(regdump_len);
@@ -927,9 +938,16 @@ static int open_file_regdump(struct inode *inode, struct file *file)
                return -ENOMEM;
 
        ath9k_ps_wakeup(sc);
-       for (i = 0; i < num_regs; i++)
+       for (i = 0; i < num_regs; i++) {
+               if (reg_hole_list[j].start == i << 2) {
+                       i = reg_hole_list[j].end >> 2;
+                       j++;
+                       continue;
+               }
+
                len += scnprintf(buf + len, regdump_len - len,
                        "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+       }
        ath9k_ps_restore(sc);
 
        file->private_data = buf;
index c2ca57a2ed09d0f14ee10520b8a8c27f5d5543c2..b66cfa91364f101ddde22f2b75ecbd67d5041a38 100644 (file)
@@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
        }
 
        if (IS_OFDM_RATE(rs->rs_rate)) {
-               if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+               if (ah->curchan->chan->band == NL80211_BAND_2GHZ)
                        rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
                else
                        rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
@@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
        struct ath_hw *ah = sc->sc_ah;
        struct ath_rx_rate_stats *rstats;
        struct ieee80211_sta *sta = an->sta;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u32 len = 0, size = 4096;
        char *buf;
        size_t retval;
@@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
        len += scnprintf(buf + len, size - len, "\n");
 
 legacy:
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                PRINT_CCK_RATE("CCK-1M/LP", 0, false);
                PRINT_CCK_RATE("CCK-2M/LP", 1, false);
                PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
index 22b3cc4c27cda58f83a2a6deb69957fdcfa502ed..d2ff0fc0484cc4ca44afd46313dd75e2973dc409 100644 (file)
@@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
                struct ieee80211_tx_rate *rates = info->status.rates;
 
                rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
-               if (info->band == IEEE80211_BAND_2GHZ &&
+               if (info->band == NL80211_BAND_2GHZ &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
index 284706798c71deda9ecc8a400bcff6e26b8999c5..490f74d9ddf0a62c39cdd65b6f053248979f03fe 100644 (file)
 /********************************/
 
 #ifdef CONFIG_MAC80211_LEDS
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       /* Set default led pin if invalid */
+       if (ah->led_pin < 0) {
+               if (AR_SREV_9287(ah))
+                       ah->led_pin = ATH_LED_PIN_9287;
+               else if (AR_SREV_9485(ah))
+                       ah->led_pin = ATH_LED_PIN_9485;
+               else if (AR_SREV_9300(ah))
+                       ah->led_pin = ATH_LED_PIN_9300;
+               else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+                       ah->led_pin = ATH_LED_PIN_9462;
+               else
+                       ah->led_pin = ATH_LED_PIN_DEF;
+       }
+
+       /* Configure gpio for output */
+       ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led",
+                                 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+       /* LED off, active low */
+       ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1);
+}
+
 static void ath_led_brightness(struct led_classdev *led_cdev,
                               enum led_brightness brightness)
 {
@@ -40,6 +67,8 @@ void ath_deinit_leds(struct ath_softc *sc)
 
        ath_led_brightness(&sc->led_cdev, LED_OFF);
        led_classdev_unregister(&sc->led_cdev);
+
+       ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin);
 }
 
 void ath_init_leds(struct ath_softc *sc)
@@ -49,6 +78,8 @@ void ath_init_leds(struct ath_softc *sc)
        if (AR_SREV_9100(sc->sc_ah))
                return;
 
+       ath_fill_led_pin(sc);
+
        if (!ath9k_led_blink)
                sc->led_cdev.default_trigger =
                        ieee80211_get_radio_led_name(sc->hw);
@@ -64,37 +95,6 @@ void ath_init_leds(struct ath_softc *sc)
 
        sc->led_registered = true;
 }
-
-void ath_fill_led_pin(struct ath_softc *sc)
-{
-       struct ath_hw *ah = sc->sc_ah;
-
-       if (AR_SREV_9100(ah))
-               return;
-
-       if (ah->led_pin >= 0) {
-               if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
-                       ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
-               return;
-       }
-
-       if (AR_SREV_9287(ah))
-               ah->led_pin = ATH_LED_PIN_9287;
-       else if (AR_SREV_9485(sc->sc_ah))
-               ah->led_pin = ATH_LED_PIN_9485;
-       else if (AR_SREV_9300(sc->sc_ah))
-               ah->led_pin = ATH_LED_PIN_9300;
-       else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
-               ah->led_pin = ATH_LED_PIN_9462;
-       else
-               ah->led_pin = ATH_LED_PIN_DEF;
-
-       /* Configure gpio 1 for output */
-       ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-
-       /* LED off, active low */
-       ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
-}
 #endif
 
 /*******************/
@@ -402,6 +402,13 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
 
        if (ath9k_hw_mci_is_enabled(ah))
                ath_mci_cleanup(sc);
+       else {
+               enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah);
+
+               if (scheme == ATH_BTCOEX_CFG_2WIRE ||
+                   scheme == ATH_BTCOEX_CFG_3WIRE)
+                       ath9k_hw_btcoex_deinit(sc->sc_ah);
+       }
 }
 
 int ath9k_init_btcoex(struct ath_softc *sc)
index 8cbf4904db7b282b1f73dea228179e75b230cc25..e1c338cb9cb52db354df0f3ae417fe69da72b6b1 100644 (file)
@@ -527,7 +527,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
                                    struct sk_buff *skb)
 {
        struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
-       int index = 0, i = 0, len = skb->len;
+       int index = 0, i, len = skb->len;
        int rx_remain_len, rx_pkt_len;
        u16 pool_index = 0;
        u8 *ptr;
index 2aabcbdaba4e15f93cd4f8924c922e480fa8d8dd..ecb848b60725461a2cfd810b25aa226ec74b69c8 100644 (file)
@@ -253,17 +253,19 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
        ath9k_led_brightness(&priv->led_cdev, LED_OFF);
        led_classdev_unregister(&priv->led_cdev);
        cancel_work_sync(&priv->led_work);
+
+       ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin);
 }
 
 
 void ath9k_configure_leds(struct ath9k_htc_priv *priv)
 {
        /* Configure gpio 1 for output */
-       ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin,
+                                 "ath9k-led",
+                                 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
        /* LED off, active low */
        ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
-
 }
 
 void ath9k_init_leds(struct ath9k_htc_priv *priv)
index 8647ab77c019a30e7b8044b6147a62493626529b..c148c6c504f722976392823cf8046bebbad8d662 100644 (file)
@@ -262,11 +262,11 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
        __be32 tmpval[8];
        int i, ret;
 
-       for (i = 0; i < count; i++) {
-              tmpaddr[i] = cpu_to_be32(addr[i]);
-       }
+       for (i = 0; i < count; i++) {
+               tmpaddr[i] = cpu_to_be32(addr[i]);
+       }
 
-       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
                           (u8 *)tmpaddr , sizeof(u32) * count,
                           (u8 *)tmpval, sizeof(u32) * count,
                           100);
@@ -275,9 +275,9 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
                        "Multiple REGISTER READ FAILED (count: %d)\n", count);
        }
 
-       for (i = 0; i < count; i++) {
-              val[i] = be32_to_cpu(tmpval[i]);
-       }
+       for (i = 0; i < count; i++) {
+               val[i] = be32_to_cpu(tmpval[i]);
+       }
 }
 
 static void ath9k_regwrite_multi(struct ath_common *common)
@@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                sizeof(struct htc_frame_hdr) + 4;
 
        if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &common->sbands[IEEE80211_BAND_2GHZ];
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                       &common->sbands[NL80211_BAND_2GHZ];
        if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &common->sbands[IEEE80211_BAND_5GHZ];
+               hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                       &common->sbands[NL80211_BAND_5GHZ];
 
        ath9k_cmn_reload_chainmask(ah);
 
index 639294a9e34df6b2461b4c4c8052af75cbf2b258..a553c91d41a143c90c374e6fff78131e835fb3a4 100644 (file)
@@ -246,7 +246,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
        struct ieee80211_conf *conf = &common->hw->conf;
        bool fastcc;
        struct ieee80211_channel *channel = hw->conf.chandef.chan;
-       struct ath9k_hw_cal_data *caldata = NULL;
+       struct ath9k_hw_cal_data *caldata;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -274,10 +274,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                priv->ah->curchan->channel,
                channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
                fastcc);
-
-       if (!fastcc)
-               caldata = &priv->caldata;
-
+       caldata = fastcc ? NULL : &priv->caldata;
        ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (ret) {
                ath_err(common,
@@ -1770,8 +1767,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
        memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
 
        tmask.vif_index = avp->index;
-       tmask.band = IEEE80211_BAND_2GHZ;
-       tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
+       tmask.band = NL80211_BAND_2GHZ;
+       tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy);
 
        WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
        if (ret) {
@@ -1781,8 +1778,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
                goto out;
        }
 
-       tmask.band = IEEE80211_BAND_5GHZ;
-       tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
+       tmask.band = NL80211_BAND_5GHZ;
+       tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy);
 
        WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
        if (ret) {
@@ -1793,8 +1790,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
        }
 
        ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
-               mask->control[IEEE80211_BAND_2GHZ].legacy,
-               mask->control[IEEE80211_BAND_5GHZ].legacy);
+               mask->control[NL80211_BAND_2GHZ].legacy,
+               mask->control[NL80211_BAND_5GHZ].legacy);
 out:
        return ret;
 }
index cc9648f844aeffef25c8d36eb33776b7c152d90c..f333ef1e3e7b4171cbe0ff37a21c43f87f4937b3 100644 (file)
@@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
                if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
                        rate->flags |= IEEE80211_TX_RC_SHORT_GI;
        } else {
-               if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+               if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ)
                        rate->idx += 4; /* No CCK rates */
        }
 
index e7a31016f370ade654f6e4fcd05e3adc4595a13e..8b2895f9ac7a4039018547c53c30cd6ffa091aee 100644 (file)
@@ -1582,8 +1582,10 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
                if (!(gpio_mask & 1))
                        continue;
 
-               ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+               ath9k_hw_gpio_request_out(ah, i, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
                ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+               ath9k_hw_gpio_free(ah, i);
        }
 }
 
@@ -1958,7 +1960,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_init_qos(ah);
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
-               ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+               ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill");
 
        ath9k_hw_init_global_settings(ah);
 
@@ -2385,6 +2387,61 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
        }
 }
 
+static void ath9k_gpio_cap_init(struct ath_hw *ah)
+{
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+       if (AR_SREV_9271(ah)) {
+               pCap->num_gpio_pins = AR9271_NUM_GPIO;
+               pCap->gpio_mask = AR9271_GPIO_MASK;
+       } else if (AR_DEVID_7010(ah)) {
+               pCap->num_gpio_pins = AR7010_NUM_GPIO;
+               pCap->gpio_mask = AR7010_GPIO_MASK;
+       } else if (AR_SREV_9287(ah)) {
+               pCap->num_gpio_pins = AR9287_NUM_GPIO;
+               pCap->gpio_mask = AR9287_GPIO_MASK;
+       } else if (AR_SREV_9285(ah)) {
+               pCap->num_gpio_pins = AR9285_NUM_GPIO;
+               pCap->gpio_mask = AR9285_GPIO_MASK;
+       } else if (AR_SREV_9280(ah)) {
+               pCap->num_gpio_pins = AR9280_NUM_GPIO;
+               pCap->gpio_mask = AR9280_GPIO_MASK;
+       } else if (AR_SREV_9300(ah)) {
+               pCap->num_gpio_pins = AR9300_NUM_GPIO;
+               pCap->gpio_mask = AR9300_GPIO_MASK;
+       } else if (AR_SREV_9330(ah)) {
+               pCap->num_gpio_pins = AR9330_NUM_GPIO;
+               pCap->gpio_mask = AR9330_GPIO_MASK;
+       } else if (AR_SREV_9340(ah)) {
+               pCap->num_gpio_pins = AR9340_NUM_GPIO;
+               pCap->gpio_mask = AR9340_GPIO_MASK;
+       } else if (AR_SREV_9462(ah)) {
+               pCap->num_gpio_pins = AR9462_NUM_GPIO;
+               pCap->gpio_mask = AR9462_GPIO_MASK;
+       } else if (AR_SREV_9485(ah)) {
+               pCap->num_gpio_pins = AR9485_NUM_GPIO;
+               pCap->gpio_mask = AR9485_GPIO_MASK;
+       } else if (AR_SREV_9531(ah)) {
+               pCap->num_gpio_pins = AR9531_NUM_GPIO;
+               pCap->gpio_mask = AR9531_GPIO_MASK;
+       } else if (AR_SREV_9550(ah)) {
+               pCap->num_gpio_pins = AR9550_NUM_GPIO;
+               pCap->gpio_mask = AR9550_GPIO_MASK;
+       } else if (AR_SREV_9561(ah)) {
+               pCap->num_gpio_pins = AR9561_NUM_GPIO;
+               pCap->gpio_mask = AR9561_GPIO_MASK;
+       } else if (AR_SREV_9565(ah)) {
+               pCap->num_gpio_pins = AR9565_NUM_GPIO;
+               pCap->gpio_mask = AR9565_GPIO_MASK;
+       } else if (AR_SREV_9580(ah)) {
+               pCap->num_gpio_pins = AR9580_NUM_GPIO;
+               pCap->gpio_mask = AR9580_GPIO_MASK;
+       } else {
+               pCap->num_gpio_pins = AR_NUM_GPIO;
+               pCap->gpio_mask = AR_GPIO_MASK;
+       }
+}
+
 int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 {
        struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2478,20 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
 
-       if (AR_SREV_9271(ah))
-               pCap->num_gpio_pins = AR9271_NUM_GPIO;
-       else if (AR_DEVID_7010(ah))
-               pCap->num_gpio_pins = AR7010_NUM_GPIO;
-       else if (AR_SREV_9300_20_OR_LATER(ah))
-               pCap->num_gpio_pins = AR9300_NUM_GPIO;
-       else if (AR_SREV_9287_11_OR_LATER(ah))
-               pCap->num_gpio_pins = AR9287_NUM_GPIO;
-       else if (AR_SREV_9285_12_OR_LATER(ah))
-               pCap->num_gpio_pins = AR9285_NUM_GPIO;
-       else if (AR_SREV_9280_20_OR_LATER(ah))
-               pCap->num_gpio_pins = AR928X_NUM_GPIO;
-       else
-               pCap->num_gpio_pins = AR_NUM_GPIO;
+       ath9k_gpio_cap_init(ah);
 
        if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
                pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -2612,8 +2656,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 /* GPIO / RFKILL / Antennae */
 /****************************/
 
-static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
-                                        u32 gpio, u32 type)
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
 {
        int addr;
        u32 gpio_shift, tmp;
@@ -2627,8 +2670,8 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
 
        gpio_shift = (gpio % 6) * 5;
 
-       if (AR_SREV_9280_20_OR_LATER(ah)
-           || (addr != AR_GPIO_OUTPUT_MUX1)) {
+       if (AR_SREV_9280_20_OR_LATER(ah) ||
+           (addr != AR_GPIO_OUTPUT_MUX1)) {
                REG_RMW(ah, addr, (type << gpio_shift),
                        (0x1f << gpio_shift));
        } else {
@@ -2640,106 +2683,144 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
        }
 }
 
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
+/* BSP should set the corresponding MUX register correctly.
+ */
+static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
+                                 const char *label)
 {
-       u32 gpio_shift;
+       if (ah->caps.gpio_requested & BIT(gpio))
+               return;
 
-       BUG_ON(gpio >= ah->caps.num_gpio_pins);
+       /* may be requested by BSP, free anyway */
+       gpio_free(gpio);
 
-       if (AR_DEVID_7010(ah)) {
-               gpio_shift = gpio;
-               REG_RMW(ah, AR7010_GPIO_OE,
-                       (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
-                       (AR7010_GPIO_OE_MASK << gpio_shift));
+       if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
                return;
-       }
 
-       gpio_shift = gpio << 1;
-       REG_RMW(ah,
-               AR_GPIO_OE_OUT,
-               (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
-               (AR_GPIO_OE_OUT_DRV << gpio_shift));
+       ah->caps.gpio_requested |= BIT(gpio);
 }
-EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
 
-u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
+                                  u32 ah_signal_type)
 {
-#define MS_REG_READ(x, y) \
-       (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
-
-       if (gpio >= ah->caps.num_gpio_pins)
-               return 0xffffffff;
+       u32 gpio_set, gpio_shift = gpio;
 
        if (AR_DEVID_7010(ah)) {
-               u32 val;
-               val = REG_READ(ah, AR7010_GPIO_IN);
-               return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
-       } else if (AR_SREV_9300_20_OR_LATER(ah))
-               return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
-                       AR_GPIO_BIT(gpio)) != 0;
-       else if (AR_SREV_9271(ah))
-               return MS_REG_READ(AR9271, gpio) != 0;
-       else if (AR_SREV_9287_11_OR_LATER(ah))
-               return MS_REG_READ(AR9287, gpio) != 0;
-       else if (AR_SREV_9285_12_OR_LATER(ah))
-               return MS_REG_READ(AR9285, gpio) != 0;
-       else if (AR_SREV_9280_20_OR_LATER(ah))
-               return MS_REG_READ(AR928X, gpio) != 0;
-       else
-               return MS_REG_READ(AR, gpio) != 0;
+               gpio_set = out ?
+                       AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT;
+               REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift,
+                       AR7010_GPIO_OE_MASK << gpio_shift);
+       } else if (AR_SREV_SOC(ah)) {
+               gpio_set = out ? 1 : 0;
+               REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+                       gpio_set << gpio_shift);
+       } else {
+               gpio_shift = gpio << 1;
+               gpio_set = out ?
+                       AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
+               REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+                       AR_GPIO_OE_OUT_DRV << gpio_shift);
+
+               if (out)
+                       ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+       }
 }
-EXPORT_SYMBOL(ath9k_hw_gpio_get);
 
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
-                        u32 ah_signal_type)
+static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out,
+                                 const char *label, u32 ah_signal_type)
 {
-       u32 gpio_shift;
+       WARN_ON(gpio >= ah->caps.num_gpio_pins);
 
-       if (AR_DEVID_7010(ah)) {
-               gpio_shift = gpio;
-               REG_RMW(ah, AR7010_GPIO_OE,
-                       (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
-                       (AR7010_GPIO_OE_MASK << gpio_shift));
-               return;
-       }
+       if (BIT(gpio) & ah->caps.gpio_mask)
+               ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type);
+       else if (AR_SREV_SOC(ah))
+               ath9k_hw_gpio_cfg_soc(ah, gpio, out, label);
+       else
+               WARN_ON(1);
+}
 
-       ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
-       gpio_shift = 2 * gpio;
-       REG_RMW(ah,
-               AR_GPIO_OE_OUT,
-               (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
-               (AR_GPIO_OE_OUT_DRV << gpio_shift));
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label)
+{
+       ath9k_hw_gpio_request(ah, gpio, false, label, 0);
 }
-EXPORT_SYMBOL(ath9k_hw_cfg_output);
+EXPORT_SYMBOL(ath9k_hw_gpio_request_in);
 
-void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+                              u32 ah_signal_type)
 {
-       if (AR_DEVID_7010(ah)) {
-               val = val ? 0 : 1;
-               REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
-                       AR_GPIO_BIT(gpio));
+       ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type);
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_request_out);
+
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
+{
+       if (!AR_SREV_SOC(ah))
                return;
+
+       WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+       if (ah->caps.gpio_requested & BIT(gpio)) {
+               gpio_free(gpio);
+               ah->caps.gpio_requested &= ~BIT(gpio);
        }
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_free);
 
-       if (AR_SREV_9271(ah))
-               val = ~val;
+u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+{
+       u32 val = 0xffffffff;
 
-       if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
-               REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
-                       AR_GPIO_BIT(gpio));
-       else
-               gpio_set_value(gpio, val & 1);
+#define MS_REG_READ(x, y) \
+       (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
+
+       WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+       if (BIT(gpio) & ah->caps.gpio_mask) {
+               if (AR_SREV_9271(ah))
+                       val = MS_REG_READ(AR9271, gpio);
+               else if (AR_SREV_9287(ah))
+                       val = MS_REG_READ(AR9287, gpio);
+               else if (AR_SREV_9285(ah))
+                       val = MS_REG_READ(AR9285, gpio);
+               else if (AR_SREV_9280(ah))
+                       val = MS_REG_READ(AR928X, gpio);
+               else if (AR_DEVID_7010(ah))
+                       val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
+               else if (AR_SREV_9300_20_OR_LATER(ah))
+                       val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+               else
+                       val = MS_REG_READ(AR, gpio);
+       } else if (BIT(gpio) & ah->caps.gpio_requested) {
+               val = gpio_get_value(gpio) & BIT(gpio);
+       } else {
+               WARN_ON(1);
+       }
+
+       return val;
 }
-EXPORT_SYMBOL(ath9k_hw_set_gpio);
+EXPORT_SYMBOL(ath9k_hw_gpio_get);
 
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
 {
-       if (gpio >= ah->caps.num_gpio_pins)
-               return;
+       WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+       if (AR_DEVID_7010(ah) || AR_SREV_9271(ah))
+               val = !val;
+       else
+               val = !!val;
+
+       if (BIT(gpio) & ah->caps.gpio_mask) {
+               u32 out_addr = AR_DEVID_7010(ah) ?
+                       AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
 
-       gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+               REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
+       } else if (BIT(gpio) & ah->caps.gpio_requested) {
+               gpio_set_value(gpio, val);
+       } else {
+               WARN_ON(1);
+       }
 }
-EXPORT_SYMBOL(ath9k_hw_request_gpio);
+EXPORT_SYMBOL(ath9k_hw_set_gpio);
 
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
 {
@@ -2833,8 +2914,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
 {
        struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
        struct ieee80211_channel *channel;
-       int chan_pwr, new_pwr, max_gain;
-       int ant_gain, ant_reduction = 0;
+       int chan_pwr, new_pwr;
 
        if (!chan)
                return;
@@ -2842,15 +2922,10 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
        channel = chan->chan;
        chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
        new_pwr = min_t(int, chan_pwr, reg->power_limit);
-       max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
-
-       ant_gain = get_antenna_gain(ah, chan);
-       if (ant_gain > max_gain)
-               ant_reduction = ant_gain - max_gain;
 
        ah->eep_ops->set_txpower(ah, chan,
                                 ath9k_regd_get_ctl(reg, chan),
-                                ant_reduction, new_pwr, test);
+                                get_antenna_gain(ah, chan), new_pwr, test);
 }
 
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
index 831a54415a2512fe890832dcd6ec688d160b1a7c..9cbca1229bac02862211c14d14048f08197dd39d 100644 (file)
 #define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA        0x1e
 
 #define AR_GPIOD_MASK               0x00001FFF
-#define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
 
 #define BASE_ACTIVATE_DELAY         100
 #define RTC_PLL_SETTLE_DELAY        (AR_SREV_9340(ah) ? 1000 : 100)
@@ -301,6 +300,8 @@ struct ath9k_hw_capabilities {
        u8 max_txchains;
        u8 max_rxchains;
        u8 num_gpio_pins;
+       u32 gpio_mask;
+       u32 gpio_requested;
        u8 rx_hp_qdepth;
        u8 rx_lp_qdepth;
        u8 rx_status_len;
@@ -1019,12 +1020,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah);
 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
 
 /* GPIO / RFKILL / Antennae */
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label);
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+                              u32 ah_signal_type);
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio);
 u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
-                        u32 ah_signal_type);
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
 
 /* General Operation */
index 1c226d63bb0362d44e3420758909592f08287a12..2ee8624755f7f767bb859d8aa1300330dc092bfd 100644 (file)
@@ -49,6 +49,10 @@ int ath9k_led_blink;
 module_param_named(blink, ath9k_led_blink, int, 0444);
 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
 
+static int ath9k_led_active_high = -1;
+module_param_named(led_active_high, ath9k_led_active_high, int, 0444);
+MODULE_PARM_DESC(led_active_high, "Invert LED polarity");
+
 static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
@@ -477,7 +481,7 @@ static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
 static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
 {
        struct ath9k_eeprom_ctx ec;
-       struct ath_hw *ah = ah = sc->sc_ah;
+       struct ath_hw *ah = sc->sc_ah;
        int err;
 
        /* try to load the EEPROM content asynchronously */
@@ -600,6 +604,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        if (ret)
                return ret;
 
+       if (ath9k_led_active_high != -1)
+               ah->config.led_active_high = ath9k_led_active_high == 1;
+
        /*
         * Enable WLAN/BT RX Antenna diversity only when:
         *
@@ -660,7 +667,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
-       ath_fill_led_pin(sc);
        ath_chanctx_init(sc);
        ath9k_offchannel_init(sc);
 
@@ -706,9 +712,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
        struct ath9k_channel *curchan = ah->curchan;
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-               ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+               ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ);
        if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-               ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+               ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ);
 
        ah->curchan = curchan;
 }
@@ -880,11 +886,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        sc->ant_tx = hw->wiphy->available_antennas_tx;
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &common->sbands[IEEE80211_BAND_2GHZ];
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                       &common->sbands[NL80211_BAND_2GHZ];
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &common->sbands[IEEE80211_BAND_5GHZ];
+               hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                       &common->sbands[NL80211_BAND_5GHZ];
 
 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
        ath9k_set_mcc_capab(sc, hw);
index 3aed43a63f946d0fa2d218b15f4621be2ad49977..8b6398850657f014dd47252e1f124e8d823f99b8 100644 (file)
@@ -718,12 +718,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (!ath_complete_reset(sc, false))
                ah->reset_power_on = false;
 
-       if (ah->led_pin >= 0) {
-               ath9k_hw_cfg_output(ah, ah->led_pin,
-                                   AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       if (ah->led_pin >= 0)
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 1 : 0);
-       }
 
        /*
         * Reset key cache to sane defaults (all entries cleared) instead of
@@ -867,11 +864,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        spin_lock_bh(&sc->sc_pcu_lock);
 
-       if (ah->led_pin >= 0) {
+       if (ah->led_pin >= 0)
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 0 : 1);
-               ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-       }
 
        ath_prepare_reset(sc);
 
@@ -1938,14 +1933,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
        if (idx == 0)
                ath_update_survey_stats(sc);
 
-       sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
        if (sband && idx >= sband->n_channels) {
                idx -= sband->n_channels;
                sband = NULL;
        }
 
        if (!sband)
-               sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+               sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
        if (!sband || idx >= sband->n_channels) {
                spin_unlock_bh(&common->cc_lock);
index e6fef1be9977d5251762af703a019bb25d71240d..7cdaf40c3057d788dbfa01865c75178871655833 100644 (file)
@@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
        { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
        { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
+
+#ifdef CONFIG_ATH9K_PCOEM
+       /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0029,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x2096),
+         .driver_data = ATH9K_PCI_LED_ACT_HI },
+#endif
+
        { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
 
 #ifdef CONFIG_ATH9K_PCOEM
index c8d35febaf0f7e7ffb2bdc462f2987ab5f1a3896..9272ca90632b983b5eec1a57612ac29307f15973 100644 (file)
 #define AR_SREV_9561(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
 
+#define AR_SREV_SOC(_ah) \
+       (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
+        AR_SREV_9561(ah))
+
 /* NOTE: When adding chips newer than Peacock, add chip check here */
 #define AR_SREV_9580_10_OR_LATER(_ah) \
        (AR_SREV_9580(_ah))
@@ -1104,14 +1108,46 @@ enum {
 
 #define AR_PCIE_PHY_REG3                        0x18c08
 
+/* Define correct GPIO numbers and MASK bits to indicate the WMAC
+ * GPIO resource.
+ * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs
+ * which rely on gpiolib framework. But restrict SOC AR9330 only to
+ * access WMAC GPIO which has the same design with the old chips.
+ */
 #define AR_NUM_GPIO                              14
-#define AR928X_NUM_GPIO                          10
+#define AR9280_NUM_GPIO                          10
 #define AR9285_NUM_GPIO                          12
-#define AR9287_NUM_GPIO                          11
+#define AR9287_NUM_GPIO                          10
 #define AR9271_NUM_GPIO                          16
-#define AR9300_NUM_GPIO                          17
+#define AR9300_NUM_GPIO                          16
+#define AR9330_NUM_GPIO                                 16
+#define AR9340_NUM_GPIO                                 23
+#define AR9462_NUM_GPIO                                 10
+#define AR9485_NUM_GPIO                                 12
+#define AR9531_NUM_GPIO                                 18
+#define AR9550_NUM_GPIO                                 24
+#define AR9561_NUM_GPIO                                 23
+#define AR9565_NUM_GPIO                                 12
+#define AR9580_NUM_GPIO                                 16
 #define AR7010_NUM_GPIO                          16
 
+#define AR_GPIO_MASK                            0x00003FFF
+#define AR9271_GPIO_MASK                        0x0000FFFF
+#define AR9280_GPIO_MASK                        0x000003FF
+#define AR9285_GPIO_MASK                        0x00000FFF
+#define AR9287_GPIO_MASK                        0x000003FF
+#define AR9300_GPIO_MASK                        0x0000F4FF
+#define AR9330_GPIO_MASK                        0x0000F4FF
+#define AR9340_GPIO_MASK                        0x0000000F
+#define AR9462_GPIO_MASK                        0x000003FF
+#define AR9485_GPIO_MASK                        0x00000FFF
+#define AR9531_GPIO_MASK                        0x0000000F
+#define AR9550_GPIO_MASK                        0x0000000F
+#define AR9561_GPIO_MASK                        0x0000000F
+#define AR9565_GPIO_MASK                        0x00000FFF
+#define AR9580_GPIO_MASK                        0x0000F4FF
+#define AR7010_GPIO_MASK                        0x0000FFFF
+
 #define AR_GPIO_IN_OUT                           (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
 #define AR_GPIO_IN_VAL                           0x0FFFC000
 #define AR_GPIO_IN_VAL_S                         14
@@ -1132,8 +1168,6 @@ enum {
 
 #define AR_GPIO_OE_OUT                           (AR_SREV_9340(ah) ? 0x4030 : \
                                                  (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
-#define AR_GPIO_OE_OUT_MASK                     (AR_SREV_9550_OR_LATER(ah) ? \
-                                                 0x0000000F : 0xFFFFFFFF)
 #define AR_GPIO_OE_OUT_DRV                       0x3
 #define AR_GPIO_OE_OUT_DRV_NO                    0x0
 #define AR_GPIO_OE_OUT_DRV_LOW                   0x1
@@ -1858,15 +1892,33 @@ enum {
 
 #define AR9300_BT_WGHT             0xcccc4444
 
-#define AR_BT_COEX_MODE2           0x817c
-#define AR_BT_BCN_MISS_THRESH      0x000000ff
-#define AR_BT_BCN_MISS_THRESH_S    0
-#define AR_BT_BCN_MISS_CNT         0x0000ff00
-#define AR_BT_BCN_MISS_CNT_S       8
-#define AR_BT_HOLD_RX_CLEAR        0x00010000
-#define AR_BT_HOLD_RX_CLEAR_S      16
-#define AR_BT_DISABLE_BT_ANT       0x00100000
-#define AR_BT_DISABLE_BT_ANT_S     20
+#define AR_BT_COEX_MODE2               0x817c
+#define AR_BT_BCN_MISS_THRESH          0x000000ff
+#define AR_BT_BCN_MISS_THRESH_S                0
+#define AR_BT_BCN_MISS_CNT             0x0000ff00
+#define AR_BT_BCN_MISS_CNT_S           8
+#define AR_BT_HOLD_RX_CLEAR            0x00010000
+#define AR_BT_HOLD_RX_CLEAR_S          16
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP  0x00080000
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19
+#define AR_BT_DISABLE_BT_ANT           0x00100000
+#define AR_BT_DISABLE_BT_ANT_S         20
+#define AR_BT_QUIET_2_WIRE             0x00200000
+#define AR_BT_QUIET_2_WIRE_S           21
+#define AR_BT_WL_ACTIVE_MODE           0x00c00000
+#define AR_BT_WL_ACTIVE_MODE_S         22
+#define AR_BT_WL_TXRX_SEPARATE         0x01000000
+#define AR_BT_WL_TXRX_SEPARATE_S       24
+#define AR_BT_RS_DISCARD_EXTEND                0x02000000
+#define AR_BT_RS_DISCARD_EXTEND_S      25
+#define AR_BT_TSF_BT_ACTIVE_CTRL       0x0c000000
+#define AR_BT_TSF_BT_ACTIVE_CTRL_S     26
+#define AR_BT_TSF_BT_PRIORITY_CTRL     0x30000000
+#define AR_BT_TSF_BT_PRIORITY_CTRL_S   28
+#define AR_BT_INTERRUPT_ENABLE         0x40000000
+#define AR_BT_INTERRUPT_ENABLE_S       30
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE   0x80000000
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S 31
 
 #define AR_TXSIFS              0x81d0
 #define AR_TXSIFS_TIME         0x000000FF
@@ -1875,6 +1927,16 @@ enum {
 #define AR_TXSIFS_ACK_SHIFT    0x00007000
 #define AR_TXSIFS_ACK_SHIFT_S  12
 
+#define AR_BT_COEX_MODE3                       0x81d4
+#define AR_BT_WL_ACTIVE_TIME                   0x000000ff
+#define AR_BT_WL_ACTIVE_TIME_S                 0
+#define AR_BT_WL_QC_TIME                       0x0000ff00
+#define AR_BT_WL_QC_TIME_S                     8
+#define AR_BT_ALLOW_CONCURRENT_ACCESS          0x000f0000
+#define AR_BT_ALLOW_CONCURRENT_ACCESS_S                16
+#define AR_BT_AGC_SATURATION_CNT_ENABLE                0x00100000
+#define AR_BT_AGC_SATURATION_CNT_ENABLE_S      20
+
 #define AR_TXOP_X          0x81ec
 #define AR_TXOP_X_VAL      0x000000FF
 
index c9cb2aad7b6f7ea95a87bc385b9e49c0eb9615f1..d38e50f96db77b52eeab1238093bffea45ec6b5e 100644 (file)
@@ -55,11 +55,26 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
        return j << 2;
 }
 
+static u32 ath9k_rng_delay_get(u32 fail_stats)
+{
+       u32 delay;
+
+       if (fail_stats < 100)
+               delay = 10;
+       else if (fail_stats < 105)
+               delay = 1000;
+       else
+               delay = 10000;
+
+       return delay;
+}
+
 static int ath9k_rng_kthread(void *data)
 {
        int bytes_read;
        struct ath_softc *sc = data;
        u32 *rng_buf;
+       u32 delay, fail_stats = 0;
 
        rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
        if (!rng_buf)
@@ -69,10 +84,13 @@ static int ath9k_rng_kthread(void *data)
                bytes_read = ath9k_rng_data_read(sc, rng_buf,
                                                 ATH9K_RNG_BUF_SIZE);
                if (unlikely(!bytes_read)) {
-                       msleep_interruptible(10);
+                       delay = ath9k_rng_delay_get(++fail_stats);
+                       msleep_interruptible(delay);
                        continue;
                }
 
+               fail_stats = 0;
+
                /* sleep until entropy bits under write_wakeup_threshold */
                add_hwgenerator_randomness((void *)rng_buf, bytes_read,
                                           ATH9K_RNG_ENTROPY(bytes_read));
index fe795fc5288cba84d5cfdd262d4a04377a5e04e4..8ddd604bd00c94aad8611c85776e46a7ef4568cb 100644 (file)
@@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                                bool is_2ghz;
                                struct modal_eep_header *pmodal;
 
-                               is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+                               is_2ghz = info->band == NL80211_BAND_2GHZ;
                                pmodal = &eep->modalHeader[is_2ghz];
                                power_ht40delta = pmodal->ht40PowerIncForPdadc;
                        } else {
@@ -1236,7 +1236,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
 
                /* legacy rates */
                rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
-               if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+               if ((tx_info->band == NL80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
index a2f005703c04c38638e0052713e5202ac0335465..7d4a72dc98dba2de94f22b3f805fffda7045966c 100644 (file)
@@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
        if (conf_is_ht40(&ar->hw->conf))
                val = 0x010a;
        else {
-               if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+               if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
                        val = 0x105;
                else
                        val = 0x104;
@@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar)
                rts_rate = 0x1da;
                cts_rate = 0x10a;
        } else {
-               if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+               if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
                        /* 11 mbit CCK */
                        rts_rate = 033;
                        cts_rate = 003;
@@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar)
                return 0;
        }
 
-       if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
+       if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
            vif->bss_conf.use_short_slot)
                slottime = 9;
 
@@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar)
        basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
        rcu_read_unlock();
 
-       if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+       if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
                mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
        else
                mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
@@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
                chains = AR9170_TX_PHY_TXCHAIN_1;
 
        switch (channel->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                power = ar->power_2G_ofdm[0] & 0x3f;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                power = ar->power_5G_leg[0] & 0x3f;
                break;
        default:
index 4d1527a2e292a2ba2d29907554625cf0fbb980f4..ffb22a04beeb748a8a7339bf93ed9c9a0fdc298e 100644 (file)
@@ -1666,7 +1666,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
                        return err;
        }
 
-       for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+       for (b = 0; b < NUM_NL80211_BANDS; b++) {
                band = ar->hw->wiphy->bands[b];
 
                if (!band)
@@ -1941,13 +1941,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
        }
 
        if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
-               ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
                        &carl9170_band_2GHz;
                chans += carl9170_band_2GHz.n_channels;
                bands++;
        }
        if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
-               ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+               ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
                        &carl9170_band_5GHz;
                chans += carl9170_band_5GHz.n_channels;
                bands++;
index dca6df13fd5b9be5de049f56b595d94fa94aeda2..34d9fd77046eefc02c7cc7ef69b40fa43ff6fce3 100644 (file)
@@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
        return carl9170_regwrite_result();
 }
 
-static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
 {
        int i, err;
        u32 val;
-       bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+       bool is_2ghz = band == NL80211_BAND_2GHZ;
        bool is_40mhz = conf_is_ht40(&ar->hw->conf);
 
        carl9170_regwrite_begin(ar);
@@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
        u8 f, tmp;
 
        switch (channel->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                f = channel->center_freq - 2300;
                cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
                i = AR5416_NUM_2G_CAL_PIERS - 1;
                break;
 
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                f = (channel->center_freq - 4800) / 5;
                cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
                i = AR5416_NUM_5G_CAL_PIERS - 1;
@@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
                        int j;
 
                        switch (channel->band) {
-                       case IEEE80211_BAND_2GHZ:
+                       case NL80211_BAND_2GHZ:
                                cal_pier_data = &ar->eeprom.
                                        cal_pier_data_2G[chain][idx];
                                break;
 
-                       case IEEE80211_BAND_5GHZ:
+                       case NL80211_BAND_5GHZ:
                                cal_pier_data = &ar->eeprom.
                                        cal_pier_data_5G[chain][idx];
                                break;
@@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
                /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
                return;
 
-       if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+       if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
                modes = mode_list_2ghz;
                nr_modes = ARRAY_SIZE(mode_list_2ghz);
        } else {
@@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
                return err;
 
        err = carl9170_init_rf_banks_0_7(ar,
-                                        channel->band == IEEE80211_BAND_5GHZ);
+                                        channel->band == NL80211_BAND_5GHZ);
        if (err)
                return err;
 
@@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
                return err;
 
        err = carl9170_init_rf_bank4_pwr(ar,
-                                        channel->band == IEEE80211_BAND_5GHZ,
+                                        channel->band == NL80211_BAND_5GHZ,
                                         channel->center_freq, bw);
        if (err)
                return err;
index d66533cbc38ab7a56993102d7187765e45dfd907..0c34c8729dc610fbdf7d05a9bc2a50af8c4ae158 100644 (file)
@@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
 
                        return -EINVAL;
                }
-               if (status->band == IEEE80211_BAND_2GHZ)
+               if (status->band == NL80211_BAND_2GHZ)
                        status->rate_idx += 4;
                break;
 
index ae86a600d9207a4a59530710daae08f857155d34..2bf04c9edc9832d797549c96c091ade2cbe3d455 100644 (file)
@@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
                        /* +1 dBm for HT40 */
                        *tpc += 2;
 
-                       if (info->band == IEEE80211_BAND_2GHZ)
+                       if (info->band == NL80211_BAND_2GHZ)
                                txpower = ar->power_2G_ht40;
                        else
                                txpower = ar->power_5G_ht40;
                } else {
-                       if (info->band == IEEE80211_BAND_2GHZ)
+                       if (info->band == NL80211_BAND_2GHZ)
                                txpower = ar->power_2G_ht20;
                        else
                                txpower = ar->power_5G_ht20;
@@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
                *phyrate = txrate->idx;
                *tpc += txpower[idx & 7];
        } else {
-               if (info->band == IEEE80211_BAND_2GHZ) {
+               if (info->band == NL80211_BAND_2GHZ) {
                        if (idx < 4)
                                txpower = ar->power_2G_cck;
                        else
@@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
                 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
                 */
        } else {
-               if (info->band == IEEE80211_BAND_2GHZ) {
+               if (info->band == NL80211_BAND_2GHZ) {
                        if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
                                tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
                        else
index 06ea6cc9e30a5e07c379116a8e1ec0996e3f189e..7e15ed9ed31f6e29aca8e39f88043d96bcefc357 100644 (file)
@@ -336,12 +336,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
                              struct ath_regulatory *reg,
                              enum nl80211_reg_initiator initiator)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        unsigned int i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!wiphy->bands[band])
                        continue;
                sband = wiphy->bands[band];
@@ -374,7 +374,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
 {
        struct ieee80211_supported_band *sband;
 
-       sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = wiphy->bands[NL80211_BAND_2GHZ];
        if (!sband)
                return;
 
@@ -402,10 +402,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
        struct ieee80211_channel *ch;
        unsigned int i;
 
-       if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+       if (!wiphy->bands[NL80211_BAND_5GHZ])
                return;
 
-       sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+       sband = wiphy->bands[NL80211_BAND_5GHZ];
 
        for (i = 0; i < sband->n_channels; i++) {
                ch = &sband->channels[i];
@@ -772,7 +772,7 @@ ath_regd_init(struct ath_regulatory *reg,
 EXPORT_SYMBOL(ath_regd_init);
 
 u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
-                         enum ieee80211_band band)
+                         enum nl80211_band band)
 {
        if (!reg->regpair ||
            (reg->country_code == CTRY_DEFAULT &&
@@ -794,9 +794,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
        }
 
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                return reg->regpair->reg_2ghz_ctl;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                return reg->regpair->reg_5ghz_ctl;
        default:
                return NO_CTL;
index 37f53bd8fcb13ad1b7f850924383a4e9a297f7af..565d3075f06ee5000e3b0205428a83b2419165f9 100644 (file)
@@ -255,7 +255,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
                  void (*reg_notifier)(struct wiphy *wiphy,
                                       struct regulatory_request *request));
 u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
-                         enum ieee80211_band band);
+                         enum nl80211_band band);
 void ath_reg_notifier_apply(struct wiphy *wiphy,
                            struct regulatory_request *request,
                            struct ath_regulatory *reg);
index ef44a2da644d47be1613041703093cf7567f8979..2a6bb62e785c6265c4462f546e393742ce759ab2 100644 (file)
@@ -33,9 +33,7 @@ static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
        char buf[3];
 
        list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-                       vif = container_of((void *)vif_priv,
-                                  struct ieee80211_vif,
-                                  drv_priv);
+                       vif = wcn36xx_priv_to_vif(vif_priv);
                        if (NL80211_IFTYPE_STATION == vif->type) {
                                if (vif_priv->pw_state == WCN36XX_BMPS)
                                        buf[0] = '1';
@@ -70,9 +68,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
        case 'Y':
        case '1':
                list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-                       vif = container_of((void *)vif_priv,
-                                  struct ieee80211_vif,
-                                  drv_priv);
+                       vif = wcn36xx_priv_to_vif(vif_priv);
                        if (NL80211_IFTYPE_STATION == vif->type) {
                                wcn36xx_enable_keep_alive_null_packet(wcn, vif);
                                wcn36xx_pmc_enter_bmps_state(wcn, vif);
@@ -83,9 +79,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
        case 'N':
        case '0':
                list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-                       vif = container_of((void *)vif_priv,
-                                  struct ieee80211_vif,
-                                  drv_priv);
+                       vif = wcn36xx_priv_to_vif(vif_priv);
                        if (NL80211_IFTYPE_STATION == vif->type)
                                wcn36xx_pmc_exit_bmps_state(wcn, vif);
                }
index b947de0fb2e599804e6ea0cc9f0a2218a71611de..658bfb8baabe5248e68cc87e43405123213ee908 100644 (file)
 
 #define WCN36XX_HAL_IPV4_ADDR_LEN       4
 
-#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_STA_INVALID_IDX 0xFF
 #define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
 
 /* Default Beacon template size */
 #define BEACON_TEMPLATE_SIZE 0x180
 
+/* Minimum PVM size that the FW expects. See comment in smd.c for details. */
+#define TIM_MIN_PVM_SIZE 6
+
 /* Param Change Bitmap sent to HAL */
 #define PARAM_BCN_INTERVAL_CHANGED                      (1 << 0)
 #define PARAM_SHORT_PREAMBLE_CHANGED                 (1 << 1)
@@ -2884,11 +2887,14 @@ struct update_beacon_rsp_msg {
 struct wcn36xx_hal_send_beacon_req_msg {
        struct wcn36xx_hal_msg_header header;
 
+       /* length of the template + 6. Only qcom knows why */
+       u32 beacon_length6;
+
        /* length of the template. */
        u32 beacon_length;
 
        /* Beacon data. */
-       u8 beacon[BEACON_TEMPLATE_SIZE];
+       u8 beacon[BEACON_TEMPLATE_SIZE - sizeof(u32)];
 
        u8 bssid[ETH_ALEN];
 
@@ -4261,9 +4267,9 @@ struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
        u8 data_offset;
 
        u32 mc_addr_count;
-       u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+       u8 mc_addr[WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS][ETH_ALEN];
        u8 bss_index;
-};
+} __packed;
 
 struct wcn36xx_hal_set_pkt_filter_rsp_msg {
        struct wcn36xx_hal_msg_header header;
@@ -4317,7 +4323,7 @@ struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
        struct wcn36xx_hal_msg_header header;
        struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
-};
+} __packed;
 
 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
        struct wcn36xx_hal_msg_header header;
@@ -4383,6 +4389,45 @@ enum place_holder_in_cap_bitmap {
        RTT = 20,
        RATECTRL = 21,
        WOW = 22,
+       WLAN_ROAM_SCAN_OFFLOAD = 23,
+       SPECULATIVE_PS_POLL = 24,
+       SCAN_SCH = 25,
+       IBSS_HEARTBEAT_OFFLOAD = 26,
+       WLAN_SCAN_OFFLOAD = 27,
+       WLAN_PERIODIC_TX_PTRN = 28,
+       ADVANCE_TDLS = 29,
+       BATCH_SCAN = 30,
+       FW_IN_TX_PATH = 31,
+       EXTENDED_NSOFFLOAD_SLOT = 32,
+       CH_SWITCH_V1 = 33,
+       HT40_OBSS_SCAN = 34,
+       UPDATE_CHANNEL_LIST = 35,
+       WLAN_MCADDR_FLT = 36,
+       WLAN_CH144 = 37,
+       NAN = 38,
+       TDLS_SCAN_COEXISTENCE = 39,
+       LINK_LAYER_STATS_MEAS = 40,
+       MU_MIMO = 41,
+       EXTENDED_SCAN = 42,
+       DYNAMIC_WMM_PS = 43,
+       MAC_SPOOFED_SCAN = 44,
+       BMU_ERROR_GENERIC_RECOVERY = 45,
+       DISA = 46,
+       FW_STATS = 47,
+       WPS_PRBRSP_TMPL = 48,
+       BCN_IE_FLT_DELTA = 49,
+       TDLS_OFF_CHANNEL = 51,
+       RTT3 = 52,
+       MGMT_FRAME_LOGGING = 53,
+       ENHANCED_TXBD_COMPLETION = 54,
+       LOGGING_ENHANCEMENT = 55,
+       EXT_SCAN_ENHANCED = 56,
+       MEMORY_DUMP_SUPPORTED = 57,
+       PER_PKT_STATS_SUPPORTED = 58,
+       EXT_LL_STAT = 60,
+       WIFI_CONFIG = 61,
+       ANTENNA_DIVERSITY_SELECTION = 62,
+
        MAX_FEATURE_SUPPORTED = 128,
 };
 
index a27279c2c6950913c0b1f14fd4cacdd3bc650428..a920d70201481379b769ef5d046caf8b55dd7ac4 100644 (file)
@@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 
 #define CHAN2G(_freq, _idx) { \
-       .band = IEEE80211_BAND_2GHZ, \
+       .band = NL80211_BAND_2GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
        .max_power = 25, \
 }
 
 #define CHAN5G(_freq, _idx) { \
-       .band = IEEE80211_BAND_5GHZ, \
+       .band = NL80211_BAND_5GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
        .max_power = 25, \
@@ -201,7 +201,45 @@ static const char * const wcn36xx_caps_names[] = {
        "BCN_FILTER",                   /* 19 */
        "RTT",                          /* 20 */
        "RATECTRL",                     /* 21 */
-       "WOW"                           /* 22 */
+       "WOW",                          /* 22 */
+       "WLAN_ROAM_SCAN_OFFLOAD",       /* 23 */
+       "SPECULATIVE_PS_POLL",          /* 24 */
+       "SCAN_SCH",                     /* 25 */
+       "IBSS_HEARTBEAT_OFFLOAD",       /* 26 */
+       "WLAN_SCAN_OFFLOAD",            /* 27 */
+       "WLAN_PERIODIC_TX_PTRN",        /* 28 */
+       "ADVANCE_TDLS",                 /* 29 */
+       "BATCH_SCAN",                   /* 30 */
+       "FW_IN_TX_PATH",                /* 31 */
+       "EXTENDED_NSOFFLOAD_SLOT",      /* 32 */
+       "CH_SWITCH_V1",                 /* 33 */
+       "HT40_OBSS_SCAN",               /* 34 */
+       "UPDATE_CHANNEL_LIST",          /* 35 */
+       "WLAN_MCADDR_FLT",              /* 36 */
+       "WLAN_CH144",                   /* 37 */
+       "NAN",                          /* 38 */
+       "TDLS_SCAN_COEXISTENCE",        /* 39 */
+       "LINK_LAYER_STATS_MEAS",        /* 40 */
+       "MU_MIMO",                      /* 41 */
+       "EXTENDED_SCAN",                /* 42 */
+       "DYNAMIC_WMM_PS",               /* 43 */
+       "MAC_SPOOFED_SCAN",             /* 44 */
+       "BMU_ERROR_GENERIC_RECOVERY",   /* 45 */
+       "DISA",                         /* 46 */
+       "FW_STATS",                     /* 47 */
+       "WPS_PRBRSP_TMPL",              /* 48 */
+       "BCN_IE_FLT_DELTA",             /* 49 */
+       "TDLS_OFF_CHANNEL",             /* 51 */
+       "RTT3",                         /* 52 */
+       "MGMT_FRAME_LOGGING",           /* 53 */
+       "ENHANCED_TXBD_COMPLETION",     /* 54 */
+       "LOGGING_ENHANCEMENT",          /* 55 */
+       "EXT_SCAN_ENHANCED",            /* 56 */
+       "MEMORY_DUMP_SUPPORTED",        /* 57 */
+       "PER_PKT_STATS_SUPPORTED",      /* 58 */
+       "EXT_LL_STAT",                  /* 60 */
+       "WIFI_CONFIG",                  /* 61 */
+       "ANTENNA_DIVERSITY_SELECTION",  /* 62 */
 };
 
 static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -287,6 +325,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
        }
 
        wcn36xx_detect_chip_version(wcn);
+       wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1);
 
        /* DMA channel initialization */
        ret = wcn36xx_dxe_init(wcn);
@@ -346,9 +385,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
                wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
                            ch);
                list_for_each_entry(tmp, &wcn->vif_list, list) {
-                       vif = container_of((void *)tmp,
-                                          struct ieee80211_vif,
-                                          drv_priv);
+                       vif = wcn36xx_priv_to_vif(tmp);
                        wcn36xx_smd_switch_channel(wcn, vif, ch);
                }
        }
@@ -356,15 +393,57 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
        return 0;
 }
 
-#define WCN36XX_SUPPORTED_FILTERS (0)
-
 static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
                                     unsigned int changed,
                                     unsigned int *total, u64 multicast)
 {
+       struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *tmp;
+       struct ieee80211_vif *vif = NULL;
+
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
 
-       *total &= WCN36XX_SUPPORTED_FILTERS;
+       *total &= FIF_ALLMULTI;
+
+       fp = (void *)(unsigned long)multicast;
+       list_for_each_entry(tmp, &wcn->vif_list, list) {
+               vif = wcn36xx_priv_to_vif(tmp);
+
+               /* FW handles MC filtering only when connected as STA */
+               if (*total & FIF_ALLMULTI)
+                       wcn36xx_smd_set_mc_list(wcn, vif, NULL);
+               else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+                       wcn36xx_smd_set_mc_list(wcn, vif, fp);
+       }
+       kfree(fp);
+}
+
+static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw,
+                                    struct netdev_hw_addr_list *mc_list)
+{
+       struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+       struct netdev_hw_addr *ha;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n");
+       fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+       if (!fp) {
+               wcn36xx_err("Out of memory setting filters.\n");
+               return 0;
+       }
+
+       fp->mc_addr_count = 0;
+       /* update multicast filtering parameters */
+       if (netdev_hw_addr_list_count(mc_list) <=
+           WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) {
+               netdev_hw_addr_list_for_each(ha, mc_list) {
+                       memcpy(fp->mc_addr[fp->mc_addr_count],
+                                       ha->addr, ETH_ALEN);
+                       fp->mc_addr_count++;
+               }
+       }
+
+       return (u64)(unsigned long)fp;
 }
 
 static void wcn36xx_tx(struct ieee80211_hw *hw,
@@ -375,7 +454,7 @@ static void wcn36xx_tx(struct ieee80211_hw *hw,
        struct wcn36xx_sta *sta_priv = NULL;
 
        if (control->sta)
-               sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+               sta_priv = wcn36xx_sta_to_priv(control->sta);
 
        if (wcn36xx_start_tx(wcn, sta_priv, skb))
                ieee80211_free_txskb(wcn->hw, skb);
@@ -387,8 +466,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                           struct ieee80211_key_conf *key_conf)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-       struct wcn36xx_sta *sta_priv = vif_priv->sta;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
        int ret = 0;
        u8 key[WLAN_MAX_KEY_LEN];
 
@@ -473,6 +552,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
        case DISABLE_KEY:
                if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+                       vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
                        wcn36xx_smd_remove_bsskey(wcn,
                                vif_priv->encrypt_type,
                                key_conf->keyidx);
@@ -516,11 +596,11 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
 }
 
 static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
-                                        enum ieee80211_band band)
+                                        enum nl80211_band band)
 {
        int i, size;
        u16 *rates_table;
-       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
        u32 rates = sta->supp_rates[band];
 
        memset(&sta_priv->supported_rates, 0,
@@ -529,7 +609,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
 
        size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
        rates_table = sta_priv->supported_rates.dsss_rates;
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                for (i = 0; i < size; i++) {
                        if (rates & 0x01) {
                                rates_table[i] = wcn_2ghz_rates[i].hw_value;
@@ -590,7 +670,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
        struct sk_buff *skb = NULL;
        u16 tim_off, tim_len;
        enum wcn36xx_hal_link_state link_state;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
                    vif, changed);
@@ -620,7 +700,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
 
                if (!is_zero_ether_addr(bss_conf->bssid)) {
                        vif_priv->is_joining = true;
-                       vif_priv->bss_index = 0xff;
+                       vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
                        wcn36xx_smd_join(wcn, bss_conf->bssid,
                                         vif->addr, WCN36XX_HW_CHANNEL(wcn));
                        wcn36xx_smd_config_bss(wcn, vif, NULL,
@@ -628,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                } else {
                        vif_priv->is_joining = false;
                        wcn36xx_smd_delete_bss(wcn, vif);
+                       vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
                }
        }
 
@@ -655,6 +736,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                                     vif->addr,
                                     bss_conf->aid);
 
+                       vif_priv->sta_assoc = true;
                        rcu_read_lock();
                        sta = ieee80211_find_sta(vif, bss_conf->bssid);
                        if (!sta) {
@@ -663,7 +745,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                                rcu_read_unlock();
                                goto out;
                        }
-                       sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+                       sta_priv = wcn36xx_sta_to_priv(sta);
 
                        wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
 
@@ -686,6 +768,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                                    bss_conf->bssid,
                                    vif->addr,
                                    bss_conf->aid);
+                       vif_priv->sta_assoc = false;
                        wcn36xx_smd_set_link_st(wcn,
                                                bss_conf->bssid,
                                                vif->addr,
@@ -713,7 +796,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
 
                if (bss_conf->enable_beacon) {
                        vif_priv->dtim_period = bss_conf->dtim_period;
-                       vif_priv->bss_index = 0xff;
+                       vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
                        wcn36xx_smd_config_bss(wcn, vif, NULL,
                                               vif->addr, false);
                        skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
@@ -734,9 +817,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                        wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
                                                link_state);
                } else {
+                       wcn36xx_smd_delete_bss(wcn, vif);
                        wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
                                                WCN36XX_HAL_LINK_IDLE_STATE);
-                       wcn36xx_smd_delete_bss(wcn, vif);
                }
        }
 out:
@@ -757,7 +840,7 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
 
        list_del(&vif_priv->list);
@@ -768,7 +851,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
                    vif, vif->type);
@@ -792,13 +875,12 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
                    vif, sta->addr);
 
        spin_lock_init(&sta_priv->ampdu_lock);
-       vif_priv->sta = sta_priv;
        sta_priv->vif = vif_priv;
        /*
         * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
@@ -817,14 +899,12 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
                              struct ieee80211_sta *sta)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
                    vif, sta->addr, sta_priv->sta_index);
 
        wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
-       vif_priv->sta = NULL;
        sta_priv->vif = NULL;
        return 0;
 }
@@ -860,7 +940,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
                    struct ieee80211_ampdu_params *params)
 {
        struct wcn36xx *wcn = hw->priv;
-       struct wcn36xx_sta *sta_priv = NULL;
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta);
        struct ieee80211_sta *sta = params->sta;
        enum ieee80211_ampdu_mlme_action action = params->action;
        u16 tid = params->tid;
@@ -869,8 +949,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
                    action, tid);
 
-       sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
-
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
                sta_priv->tid = tid;
@@ -923,6 +1001,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
        .resume                 = wcn36xx_resume,
 #endif
        .config                 = wcn36xx_config,
+       .prepare_multicast      = wcn36xx_prepare_multicast,
        .configure_filter       = wcn36xx_configure_filter,
        .tx                     = wcn36xx_tx,
        .set_key                = wcn36xx_set_key,
@@ -958,8 +1037,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
                BIT(NL80211_IFTYPE_ADHOC) |
                BIT(NL80211_IFTYPE_MESH_POINT);
 
-       wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
-       wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+       wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
+       wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
 
        wcn->hw->wiphy->cipher_suites = cipher_suites;
        wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
index 28b515c81b0e25ee4fcf019961d812ca9b5486d5..589fe5f7097160a4bcee5d1b1be979ce389e297c 100644 (file)
@@ -22,7 +22,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
                                 struct ieee80211_vif *vif)
 {
        int ret = 0;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        /* TODO: Make sure the TX chain clean */
        ret = wcn36xx_smd_enter_bmps(wcn, vif);
        if (!ret) {
@@ -42,7 +42,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
 int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
                                struct ieee80211_vif *vif)
 {
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        if (WCN36XX_BMPS != vif_priv->pw_state) {
                wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
index 74f56a81ad9ad7c628e72fd268d9372d4e561bc9..e8b630c4f11ef0cd2536eabf95b8e2994b7e890a 100644 (file)
@@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
                struct ieee80211_sta *sta,
                struct wcn36xx_hal_config_bss_params *bss_params)
 {
-       if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+       if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
                bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
        else if (sta && sta->ht_cap.ht_supported)
                bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
-       else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+       else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
                bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
        else
                bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -191,16 +191,16 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
                struct ieee80211_sta *sta,
                struct wcn36xx_hal_config_sta_params *sta_params)
 {
-       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
-       struct wcn36xx_sta *priv_sta = NULL;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+       struct wcn36xx_sta *sta_priv = NULL;
        if (vif->type == NL80211_IFTYPE_ADHOC ||
            vif->type == NL80211_IFTYPE_AP ||
            vif->type == NL80211_IFTYPE_MESH_POINT) {
                sta_params->type = 1;
-               sta_params->sta_index = 0xFF;
+               sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX;
        } else {
                sta_params->type = 0;
-               sta_params->sta_index = 1;
+               sta_params->sta_index = vif_priv->self_sta_index;
        }
 
        sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
@@ -215,7 +215,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
        else
                memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
 
-       sta_params->encrypt_type = priv_vif->encrypt_type;
+       sta_params->encrypt_type = vif_priv->encrypt_type;
        sta_params->short_preamble_supported = true;
 
        sta_params->rifs_mode = 0;
@@ -224,21 +224,21 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
        sta_params->uapsd = 0;
        sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
        sta_params->max_ampdu_duration = 0;
-       sta_params->bssid_index = priv_vif->bss_index;
+       sta_params->bssid_index = vif_priv->bss_index;
        sta_params->p2p = 0;
 
        if (sta) {
-               priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+               sta_priv = wcn36xx_sta_to_priv(sta);
                if (NL80211_IFTYPE_STATION == vif->type)
                        memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
                else
                        memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
                sta_params->wmm_enabled = sta->wme;
                sta_params->max_sp_len = sta->max_sp;
-               sta_params->aid = priv_sta->aid;
+               sta_params->aid = sta_priv->aid;
                wcn36xx_smd_set_sta_ht_params(sta, sta_params);
-               memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
-                       sizeof(priv_sta->supported_rates));
+               memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
+                       sizeof(sta_priv->supported_rates));
        } else {
                wcn36xx_set_default_rates(&sta_params->supported_rates);
                wcn36xx_smd_set_sta_default_ht_params(sta_params);
@@ -271,6 +271,16 @@ out:
        return ret;
 }
 
+static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
+                        enum wcn36xx_hal_host_msg_type msg_type,
+                        size_t msg_size)
+{
+       memset(hdr, 0, msg_size + sizeof(*hdr));
+       hdr->msg_type = msg_type;
+       hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
+       hdr->len = msg_size + sizeof(*hdr);
+}
+
 #define INIT_HAL_MSG(msg_body, type) \
        do {                                                            \
                memset(&msg_body, 0, sizeof(msg_body));                 \
@@ -302,22 +312,6 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
        return 0;
 }
 
-static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
-                                            size_t len)
-{
-       struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
-
-       if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
-               return wcn36xx_smd_rsp_status_check(buf, len);
-
-       rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
-
-       if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
-               return rsp->status;
-
-       return 0;
-}
-
 int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
 {
        struct nv_data *nv_d;
@@ -726,7 +720,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
                                        size_t len)
 {
        struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
-       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        if (len < sizeof(*rsp))
                return -EINVAL;
@@ -743,8 +737,8 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
                    "hal add sta self status %d self_sta_index %d dpu_index %d\n",
                    rsp->status, rsp->self_sta_index, rsp->dpu_index);
 
-       priv_vif->self_sta_index = rsp->self_sta_index;
-       priv_vif->self_dpu_desc_index = rsp->dpu_index;
+       vif_priv->self_sta_index = rsp->self_sta_index;
+       vif_priv->self_dpu_desc_index = rsp->dpu_index;
 
        return 0;
 }
@@ -949,17 +943,32 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
        memcpy(&v1->mac, orig->mac, ETH_ALEN);
        v1->aid = orig->aid;
        v1->type = orig->type;
+       v1->short_preamble_supported = orig->short_preamble_supported;
        v1->listen_interval = orig->listen_interval;
+       v1->wmm_enabled = orig->wmm_enabled;
        v1->ht_capable = orig->ht_capable;
-
+       v1->tx_channel_width_set = orig->tx_channel_width_set;
+       v1->rifs_mode = orig->rifs_mode;
+       v1->lsig_txop_protection = orig->lsig_txop_protection;
        v1->max_ampdu_size = orig->max_ampdu_size;
        v1->max_ampdu_density = orig->max_ampdu_density;
        v1->sgi_40mhz = orig->sgi_40mhz;
        v1->sgi_20Mhz = orig->sgi_20Mhz;
-
+       v1->rmf = orig->rmf;
+       v1->encrypt_type = orig->encrypt_type;
+       v1->action = orig->action;
+       v1->uapsd = orig->uapsd;
+       v1->max_sp_len = orig->max_sp_len;
+       v1->green_field_capable = orig->green_field_capable;
+       v1->mimo_ps = orig->mimo_ps;
+       v1->delayed_ba_support = orig->delayed_ba_support;
+       v1->max_ampdu_duration = orig->max_ampdu_duration;
+       v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz;
        memcpy(&v1->supported_rates, &orig->supported_rates,
               sizeof(orig->supported_rates));
        v1->sta_index = orig->sta_index;
+       v1->bssid_index = orig->bssid_index;
+       v1->p2p = orig->p2p;
 }
 
 static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
@@ -969,7 +978,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
 {
        struct wcn36xx_hal_config_sta_rsp_msg *rsp;
        struct config_sta_rsp_params *params;
-       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 
        if (len < sizeof(*rsp))
                return -EINVAL;
@@ -1170,12 +1179,13 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
 
 static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
                                      struct ieee80211_vif *vif,
+                                     struct ieee80211_sta *sta,
                                      void *buf,
                                      size_t len)
 {
        struct wcn36xx_hal_config_bss_rsp_msg *rsp;
        struct wcn36xx_hal_config_bss_rsp_params *params;
-       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        if (len < sizeof(*rsp))
                return -EINVAL;
@@ -1198,14 +1208,15 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
                    params->bss_bcast_sta_idx, params->mac,
                    params->tx_mgmt_power, params->ucast_dpu_signature);
 
-       priv_vif->bss_index = params->bss_index;
+       vif_priv->bss_index = params->bss_index;
 
-       if (priv_vif->sta) {
-               priv_vif->sta->bss_sta_index =  params->bss_sta_index;
-               priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+       if (sta) {
+               struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+               sta_priv->bss_sta_index = params->bss_sta_index;
+               sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
        }
 
-       priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
+       vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
 
        return 0;
 }
@@ -1217,7 +1228,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
        struct wcn36xx_hal_config_bss_req_msg msg;
        struct wcn36xx_hal_config_bss_params *bss;
        struct wcn36xx_hal_config_sta_params *sta_params;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
@@ -1329,6 +1340,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
        }
        ret = wcn36xx_smd_config_bss_rsp(wcn,
                                         vif,
+                                        sta,
                                         wcn->hal_buf,
                                         wcn->hal_rsp_len);
        if (ret) {
@@ -1343,13 +1355,13 @@ out:
 int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
        struct wcn36xx_hal_delete_bss_req_msg msg_body;
-       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
 
-       msg_body.bss_index = priv_vif->bss_index;
+       msg_body.bss_index = vif_priv->bss_index;
 
        PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
@@ -1375,26 +1387,47 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
                            u16 p2p_off)
 {
        struct wcn36xx_hal_send_beacon_req_msg msg_body;
-       int ret = 0;
+       int ret = 0, pad, pvm_len;
 
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
 
-       /* TODO need to find out why this is needed? */
-       msg_body.beacon_length = skb_beacon->len + 6;
+       pvm_len = skb_beacon->data[tim_off + 1] - 3;
+       pad = TIM_MIN_PVM_SIZE - pvm_len;
 
-       if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
-               memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
-               memcpy(&(msg_body.beacon[4]), skb_beacon->data,
-                      skb_beacon->len);
-       } else {
+       /* Padding is irrelevant to mesh mode since tim_off is always 0. */
+       if (vif->type == NL80211_IFTYPE_MESH_POINT)
+               pad = 0;
+
+       msg_body.beacon_length = skb_beacon->len + pad;
+       /* TODO need to find out why + 6 is needed */
+       msg_body.beacon_length6 = msg_body.beacon_length + 6;
+
+       if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
                wcn36xx_err("Beacon is to big: beacon size=%d\n",
                              msg_body.beacon_length);
                ret = -ENOMEM;
                goto out;
        }
+       memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len);
        memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
 
+       if (pad > 0) {
+               /*
+                * The wcn36xx FW has a fixed size for the PVM in the TIM. If
+                * given the beacon template from mac80211 with a PVM shorter
+                * than the FW expectes it will overwrite the data after the
+                * TIM.
+                */
+               wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n",
+                           pad, pvm_len);
+               memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad],
+                       &msg_body.beacon[tim_off + 5 + pvm_len],
+                       skb_beacon->len - (tim_off + 5 + pvm_len));
+               memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad);
+               msg_body.beacon[tim_off + 1] += pad;
+       }
+
        /* TODO need to find out why this is needed? */
        if (vif->type == NL80211_IFTYPE_MESH_POINT)
                /* mesh beacon don't need this, so push further down */
@@ -1598,8 +1631,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
                wcn36xx_err("Sending hal_remove_bsskey failed\n");
                goto out;
        }
-       ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
-                                             wcn->hal_rsp_len);
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
        if (ret) {
                wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
                goto out;
@@ -1612,7 +1644,7 @@ out:
 int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
        struct wcn36xx_hal_enter_bmps_req_msg msg_body;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
@@ -1641,8 +1673,8 @@ out:
 
 int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
-       struct wcn36xx_hal_enter_bmps_req_msg msg_body;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_hal_exit_bmps_req_msg msg_body;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
@@ -1703,7 +1735,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
                               int packet_type)
 {
        struct wcn36xx_hal_keep_alive_req_msg msg_body;
-       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
@@ -1944,6 +1976,17 @@ out:
        return ret;
 }
 
+static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
+{
+       struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
+
+       if (len < sizeof(*rsp))
+               return -EINVAL;
+
+       rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
+       return rsp->status;
+}
+
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
 {
        struct wcn36xx_hal_trigger_ba_req_msg msg_body;
@@ -1968,8 +2011,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
                wcn36xx_err("Sending hal_trigger_ba failed\n");
                goto out;
        }
-       ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
-                                               wcn->hal_rsp_len);
+       ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
        if (ret) {
                wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
                goto out;
@@ -2006,9 +2048,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
                list_for_each_entry(tmp, &wcn->vif_list, list) {
                        wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
                                    tmp->bss_index);
-                       vif = container_of((void *)tmp,
-                                                struct ieee80211_vif,
-                                                drv_priv);
+                       vif = wcn36xx_priv_to_vif(tmp);
                        ieee80211_connection_loss(vif);
                }
                return 0;
@@ -2023,9 +2063,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
                if (tmp->bss_index == rsp->bss_index) {
                        wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
                                    rsp->bss_index);
-                       vif = container_of((void *)tmp,
-                                                struct ieee80211_vif,
-                                                drv_priv);
+                       vif = wcn36xx_priv_to_vif(tmp);
                        ieee80211_connection_loss(vif);
                        return 0;
                }
@@ -2041,25 +2079,24 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
 {
        struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
        struct wcn36xx_vif *tmp;
-       struct ieee80211_sta *sta = NULL;
+       struct ieee80211_sta *sta;
 
        if (len != sizeof(*rsp)) {
                wcn36xx_warn("Corrupted delete sta indication\n");
                return -EIO;
        }
 
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
+                   rsp->addr2, rsp->sta_id);
+
        list_for_each_entry(tmp, &wcn->vif_list, list) {
-               if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
-                       sta = container_of((void *)tmp->sta,
-                                                struct ieee80211_sta,
-                                                drv_priv);
-                       wcn36xx_dbg(WCN36XX_DBG_HAL,
-                                   "delete station indication %pM index %d\n",
-                                   rsp->addr2,
-                                   rsp->sta_id);
+               rcu_read_lock();
+               sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
+               if (sta)
                        ieee80211_report_low_ack(sta, 0);
+               rcu_read_unlock();
+               if (sta)
                        return 0;
-               }
        }
 
        wcn36xx_warn("STA with addr %pM and index %d not found\n",
@@ -2100,6 +2137,46 @@ out:
        mutex_unlock(&wcn->hal_mutex);
        return ret;
 }
+
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+                           struct ieee80211_vif *vif,
+                           struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp)
+{
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+       struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+
+       msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
+                  wcn->hal_buf;
+       init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
+                    sizeof(msg_body->mc_addr_list));
+
+       /* An empty list means all mc traffic will be received */
+       if (fp)
+               memcpy(&msg_body->mc_addr_list, fp,
+                      sizeof(msg_body->mc_addr_list));
+       else
+               msg_body->mc_addr_list.mc_addr_count = 0;
+
+       msg_body->mc_addr_list.bss_index = vif_priv->bss_index;
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+       if (ret) {
+               wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
 static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
 {
        struct wcn36xx_hal_msg_header *msg_header = buf;
@@ -2141,6 +2218,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
        case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
        case WCN36XX_HAL_CH_SWITCH_RSP:
        case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+       case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
                memcpy(wcn->hal_buf, buf, len);
                wcn->hal_rsp_len = len;
                complete(&wcn->hal_rsp_compl);
index 8361f9e3995bfef0ec7034796a5a95841d043ca3..d74d781f4c8dcecb46475c31fac64935684bd37e 100644 (file)
@@ -44,15 +44,6 @@ struct wcn36xx_fw_msg_status_rsp {
        u32     status;
 } __packed;
 
-/* wcn3620 returns this for tigger_ba */
-
-struct wcn36xx_fw_msg_status_rsp_v2 {
-       u8      bss_id[6];
-       u32     status __packed;
-       u16     count_following_candidates __packed;
-       /* candidate list follows */
-};
-
 struct wcn36xx_hal_ind_msg {
        struct list_head list;
        u8 *msg;
@@ -136,4 +127,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
 
 int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+                           struct ieee80211_vif *vif,
+                           struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
 #endif /* _SMD_H_ */
index 99c21aac68bdb7a88a69c2f827a1a0a73b989d47..1f34c2e912d7d9d7e7e39d4684972d4173a405b0 100644 (file)
@@ -102,9 +102,7 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
        struct wcn36xx_vif *vif_priv = NULL;
        struct ieee80211_vif *vif = NULL;
        list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-                       vif = container_of((void *)vif_priv,
-                                  struct ieee80211_vif,
-                                  drv_priv);
+                       vif = wcn36xx_priv_to_vif(vif_priv);
                        if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
                                return vif_priv;
        }
@@ -167,9 +165,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
         */
        if (sta_priv) {
                __vif_priv = sta_priv->vif;
-               vif = container_of((void *)__vif_priv,
-                                  struct ieee80211_vif,
-                                  drv_priv);
+               vif = wcn36xx_priv_to_vif(__vif_priv);
 
                bd->dpu_sign = sta_priv->ucast_dpu_sign;
                if (vif->type == NL80211_IFTYPE_STATION) {
@@ -225,7 +221,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
 
        /* default rate for unicast */
        if (ieee80211_is_mgmt(hdr->frame_control))
-               bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+               bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
                        WCN36XX_BD_RATE_CTRL :
                        WCN36XX_BD_RATE_MGMT;
        else if (ieee80211_is_ctl(hdr->frame_control))
index 7b41e833e18c3e1edaf2be1f11c636fea50212d7..7433d67a5929d5578fab68361134cb02b0ffb999 100644 (file)
@@ -125,10 +125,10 @@ struct wcn36xx_platform_ctrl_ops {
  */
 struct wcn36xx_vif {
        struct list_head list;
-       struct wcn36xx_sta *sta;
        u8 dtim_period;
        enum ani_ed_type encrypt_type;
        bool is_joining;
+       bool sta_assoc;
        struct wcn36xx_hal_mac_ssid ssid;
 
        /* Power management */
@@ -263,4 +263,22 @@ struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
        return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
 }
 
+static inline
+struct wcn36xx_vif *wcn36xx_vif_to_priv(struct ieee80211_vif *vif)
+{
+       return (struct wcn36xx_vif *) vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv)
+{
+       return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+static inline
+struct wcn36xx_sta *wcn36xx_sta_to_priv(struct ieee80211_sta *sta)
+{
+       return (struct wcn36xx_sta *)sta->drv_priv;
+}
+
 #endif /* _WCN36XX_H_ */
index fdf63d5fe82bfcd12592b8b716bf7716459cf92b..11b544b26c748a71262f162713dc888a89547318 100644 (file)
@@ -18,6 +18,7 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
 wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
index 11f1bb8dfebebf95a089b015702bb9bcf72072c1..0fb3a7941d84ef26ba48fc5c6043695f11895fa7 100644 (file)
 #include "wil6210.h"
 #include "wmi.h"
 
+#define WIL_MAX_ROC_DURATION_MS 5000
+
 #define CHAN60G(_channel, _flags) {                            \
-       .band                   = IEEE80211_BAND_60GHZ,         \
+       .band                   = NL80211_BAND_60GHZ,           \
        .center_freq            = 56160 + (2160 * (_channel)),  \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -76,12 +78,24 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
                .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
                BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
        },
+       [NL80211_IFTYPE_P2P_DEVICE] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
 };
 
 static const u32 wil_cipher_suites[] = {
        WLAN_CIPHER_SUITE_GCMP,
 };
 
+static const char * const key_usage_str[] = {
+       [WMI_KEY_USE_PAIRWISE]  = "PTK",
+       [WMI_KEY_USE_RX_GROUP]  = "RX_GTK",
+       [WMI_KEY_USE_TX_GROUP]  = "TX_GTK",
+};
+
 int wil_iftype_nl2wmi(enum nl80211_iftype type)
 {
        static const struct {
@@ -113,7 +127,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
                .interval_usec = 0,
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_notify_req_done_event evt;
        } __packed reply;
        struct wil_net_stats *stats = &wil->sta[cid].stats;
@@ -226,13 +240,82 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
        return rc;
 }
 
+static struct wireless_dev *
+wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
+                      unsigned char name_assign_type,
+                      enum nl80211_iftype type,
+                      u32 *flags, struct vif_params *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *p2p_wdev;
+
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
+       if (type != NL80211_IFTYPE_P2P_DEVICE) {
+               wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (wil->p2p_wdev) {
+               wil_err(wil, "%s: P2P_DEVICE interface already created\n",
+                       __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
+       if (!p2p_wdev)
+               return ERR_PTR(-ENOMEM);
+
+       p2p_wdev->iftype = type;
+       p2p_wdev->wiphy = wiphy;
+       /* use our primary ethernet address */
+       ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
+
+       wil->p2p_wdev = p2p_wdev;
+
+       return p2p_wdev;
+}
+
+static int wil_cfg80211_del_iface(struct wiphy *wiphy,
+                                 struct wireless_dev *wdev)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
+       if (wdev != wil->p2p_wdev) {
+               wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
+                       __func__, wdev);
+               return -EINVAL;
+       }
+
+       wil_p2p_wdev_free(wil);
+
+       return 0;
+}
+
 static int wil_cfg80211_change_iface(struct wiphy *wiphy,
                                     struct net_device *ndev,
                                     enum nl80211_iftype type, u32 *flags,
                                     struct vif_params *params)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = wil->wdev;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       int rc;
+
+       wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+
+       if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
+               wil_dbg_misc(wil, "interface is up. resetting...\n");
+               mutex_lock(&wil->mutex);
+               __wil_down(wil);
+               rc = __wil_up(wil);
+               mutex_unlock(&wil->mutex);
+
+               if (rc)
+                       return rc;
+       }
 
        switch (type) {
        case NL80211_IFTYPE_STATION:
@@ -260,7 +343,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                             struct cfg80211_scan_request *request)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = wil->wdev;
+       struct wireless_dev *wdev = request->wdev;
        struct {
                struct wmi_start_scan_cmd cmd;
                u16 chnl[4];
@@ -268,6 +351,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        uint i, n;
        int rc;
 
+       wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
+                    __func__, wdev, wdev->iftype);
+
        if (wil->scan_request) {
                wil_err(wil, "Already scanning\n");
                return -EAGAIN;
@@ -277,6 +363,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -288,6 +375,20 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                return -EBUSY;
        }
 
+       /* scan on P2P_DEVICE is handled as p2p search */
+       if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+               wil->scan_request = request;
+               wil->radio_wdev = wdev;
+               rc = wil_p2p_search(wil, request);
+               if (rc) {
+                       wil->radio_wdev = wil_to_wdev(wil);
+                       wil->scan_request = NULL;
+               }
+               return rc;
+       }
+
+       (void)wil_p2p_stop_discovery(wil);
+
        wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
        wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
 
@@ -313,6 +414,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
 
        memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
        cmd.cmd.num_channels = 0;
        n = min(request->n_channels, 4U);
        for (i = 0; i < n; i++) {
@@ -340,12 +442,19 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        if (rc)
                goto out;
 
+       if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
+               cmd.cmd.discovery_mode = 1;
+               wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
+       }
+
+       wil->radio_wdev = wdev;
        rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
                        cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
 
 out:
        if (rc) {
                del_timer_sync(&wil->scan_timer);
+               wil->radio_wdev = wil_to_wdev(wil);
                wil->scan_request = NULL;
        }
 
@@ -390,6 +499,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
                print_hex_dump(KERN_INFO, "  SSID: ", DUMP_PREFIX_OFFSET,
                               16, 1, sme->ssid, sme->ssid_len, true);
        wil_info(wil, "  Privacy: %s\n", sme->privacy ? "secure" : "open");
+       wil_info(wil, "  PBSS: %d\n", sme->pbss);
        wil_print_crypto(wil, &sme->crypto);
 }
 
@@ -404,7 +514,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        const u8 *rsn_eid;
        int ch;
        int rc = 0;
+       enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
 
+       wil_dbg_misc(wil, "%s()\n", __func__);
        wil_print_connect_params(wil, sme);
 
        if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -422,14 +534,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        if (sme->privacy && !rsn_eid)
                wil_info(wil, "WSC connection\n");
 
-       if (sme->pbss) {
-               wil_err(wil, "connect - PBSS not yet supported\n");
-               return -EOPNOTSUPP;
-       }
+       if (sme->pbss)
+               bss_type = IEEE80211_BSS_TYPE_PBSS;
 
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
-                              IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+                              bss_type, IEEE80211_PRIVACY_ANY);
        if (!bss) {
                wil_err(wil, "Unable to find BSS\n");
                return -ENOENT;
@@ -568,10 +678,20 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        struct ieee80211_mgmt *mgmt_frame = (void *)buf;
        struct wmi_sw_tx_req_cmd *cmd;
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_sw_tx_complete_event evt;
        } __packed evt;
 
+       /* Note, currently we do not support the "wait" parameter, user-space
+        * must call remain_on_channel before mgmt_tx or listen on a channel
+        * another way (AP/PCP or connected station)
+        * in addition we need to check if specified "chan" argument is
+        * different from currently "listened" channel and fail if it is.
+        */
+
+       wil_dbg_misc(wil, "%s()\n", __func__);
+       print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+
        cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
        if (!cmd) {
                rc = -ENOMEM;
@@ -598,7 +718,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = wil->wdev;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
 
        wdev->preset_chandef = *chandef;
 
@@ -608,22 +728,19 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
 static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
                                               bool pairwise)
 {
-       struct wireless_dev *wdev = wil->wdev;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
        enum wmi_key_usage rc;
-       static const char * const key_usage_str[] = {
-               [WMI_KEY_USE_PAIRWISE]  = "WMI_KEY_USE_PAIRWISE",
-               [WMI_KEY_USE_RX_GROUP]  = "WMI_KEY_USE_RX_GROUP",
-               [WMI_KEY_USE_TX_GROUP]  = "WMI_KEY_USE_TX_GROUP",
-       };
 
        if (pairwise) {
                rc = WMI_KEY_USE_PAIRWISE;
        } else {
                switch (wdev->iftype) {
                case NL80211_IFTYPE_STATION:
+               case NL80211_IFTYPE_P2P_CLIENT:
                        rc = WMI_KEY_USE_RX_GROUP;
                        break;
                case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_P2P_GO:
                        rc = WMI_KEY_USE_TX_GROUP;
                        break;
                default:
@@ -638,20 +755,86 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
        return rc;
 }
 
+static struct wil_tid_crypto_rx_single *
+wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
+                   enum wmi_key_usage key_usage, const u8 *mac_addr)
+{
+       int cid = -EINVAL;
+       int tid = 0;
+       struct wil_sta_info *s;
+       struct wil_tid_crypto_rx *c;
+
+       if (key_usage == WMI_KEY_USE_TX_GROUP)
+               return NULL; /* not needed */
+
+       /* supplicant provides Rx group key in STA mode with NULL MAC address */
+       if (mac_addr)
+               cid = wil_find_cid(wil, mac_addr);
+       else if (key_usage == WMI_KEY_USE_RX_GROUP)
+               cid = wil_find_cid_by_idx(wil, 0);
+       if (cid < 0) {
+               wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
+                       key_usage_str[key_usage], key_index);
+               return ERR_PTR(cid);
+       }
+
+       s = &wil->sta[cid];
+       if (key_usage == WMI_KEY_USE_PAIRWISE)
+               c = &s->tid_crypto_rx[tid];
+       else
+               c = &s->group_crypto_rx;
+
+       return &c->key_id[key_index];
+}
+
 static int wil_cfg80211_add_key(struct wiphy *wiphy,
                                struct net_device *ndev,
                                u8 key_index, bool pairwise,
                                const u8 *mac_addr,
                                struct key_params *params)
 {
+       int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+       struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+                                                                 key_index,
+                                                                 key_usage,
+                                                                 mac_addr);
+
+       wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+                    mac_addr, key_usage_str[key_usage], key_index,
+                    params->seq_len, params->seq);
+
+       if (IS_ERR(cc)) {
+               wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
+                       __func__, mac_addr, key_usage_str[key_usage], key_index,
+                       params->seq_len, params->seq);
+               return -EINVAL;
+       }
 
-       wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
-                    pairwise ? "PTK" : "GTK");
+       if (cc)
+               cc->key_set = false;
+
+       if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
+               wil_err(wil,
+                       "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
+                       params->seq_len, __func__, mac_addr,
+                       key_usage_str[key_usage], key_index,
+                       params->seq_len, params->seq);
+               return -EINVAL;
+       }
+
+       rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+                               params->key, key_usage);
+       if ((rc == 0) && cc) {
+               if (params->seq)
+                       memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+               else
+                       memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+               cc->key_set = true;
+       }
 
-       return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
-                                 params->key, key_usage);
+       return rc;
 }
 
 static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -661,9 +844,20 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+       struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+                                                                 key_index,
+                                                                 key_usage,
+                                                                 mac_addr);
+
+       wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+                    key_usage_str[key_usage], key_index);
+
+       if (IS_ERR(cc))
+               wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+                        mac_addr, key_usage_str[key_usage], key_index);
 
-       wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
-                    pairwise ? "PTK" : "GTK");
+       if (!IS_ERR_OR_NULL(cc))
+               cc->key_set = false;
 
        return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
 }
@@ -674,6 +868,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
                                        u8 key_index, bool unicast,
                                        bool multicast)
 {
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       wil_dbg_misc(wil, "%s: entered\n", __func__);
        return 0;
 }
 
@@ -686,16 +883,19 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
 
-       /* TODO: handle duration */
-       wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+       wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
+                    __func__, chan->center_freq, duration, wdev->iftype);
 
-       rc = wmi_set_channel(wil, chan->hw_value);
+       rc = wil_p2p_listen(wil, duration, chan, cookie);
        if (rc)
                return rc;
 
-       rc = wmi_rxon(wil, true);
+       wil->radio_wdev = wdev;
 
-       return rc;
+       cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+                                 GFP_KERNEL);
+
+       return 0;
 }
 
 static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -703,13 +903,10 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
                                        u64 cookie)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       int rc;
-
-       wil_info(wil, "%s()\n", __func__);
 
-       rc = wmi_rxon(wil, false);
+       wil_dbg_misc(wil, "%s()\n", __func__);
 
-       return rc;
+       return wil_p2p_cancel_listen(wil, cookie);
 }
 
 /**
@@ -852,12 +1049,22 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
                                  const u8 *ssid, size_t ssid_len, u32 privacy,
                                  int bi, u8 chan,
                                  struct cfg80211_beacon_data *bcon,
-                                 u8 hidden_ssid)
+                                 u8 hidden_ssid, u32 pbss)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
        struct wireless_dev *wdev = ndev->ieee80211_ptr;
        u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+       u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+
+       if (pbss)
+               wmi_nettype = WMI_NETTYPE_P2P;
+
+       wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+       if (is_go && !pbss) {
+               wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+               return -ENOTSUPP;
+       }
 
        wil_set_recovery_state(wil, fw_recovery_idle);
 
@@ -879,10 +1086,11 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
        wil->privacy = privacy;
        wil->channel = chan;
        wil->hidden_ssid = hidden_ssid;
+       wil->pbss = pbss;
 
        netif_carrier_on(ndev);
 
-       rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+       rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
        if (rc)
                goto err_pcp_start;
 
@@ -928,7 +1136,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                                            wdev->ssid_len, privacy,
                                            wdev->beacon_interval,
                                            wil->channel, bcon,
-                                           wil->hidden_ssid);
+                                           wil->hidden_ssid,
+                                           wil->pbss);
        } else {
                rc = _wil_cfg80211_set_ies(wiphy, bcon);
        }
@@ -954,11 +1163,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
-       if (info->pbss) {
-               wil_err(wil, "AP: PBSS not yet supported\n");
-               return -EOPNOTSUPP;
-       }
-
        switch (info->hidden_ssid) {
        case NL80211_HIDDEN_SSID_NOT_IN_USE:
                hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -984,6 +1188,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                     info->hidden_ssid);
        wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
                     info->dtim_period);
+       wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
        print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
                             info->ssid, info->ssid_len);
        wil_print_bcon_data(bcon);
@@ -992,7 +1197,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        rc = _wil_cfg80211_start_ap(wiphy, ndev,
                                    info->ssid, info->ssid_len, info->privacy,
                                    info->beacon_interval, channel->hw_value,
-                                   bcon, hidden_ssid);
+                                   bcon, hidden_ssid, info->pbss);
 
        return rc;
 }
@@ -1139,7 +1344,26 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
+                                        struct wireless_dev *wdev)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       wil_dbg_misc(wil, "%s: entered\n", __func__);
+       return 0;
+}
+
+static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
+                                        struct wireless_dev *wdev)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       wil_dbg_misc(wil, "%s: entered\n", __func__);
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
+       .add_virtual_intf = wil_cfg80211_add_iface,
+       .del_virtual_intf = wil_cfg80211_del_iface,
        .scan = wil_cfg80211_scan,
        .connect = wil_cfg80211_connect,
        .disconnect = wil_cfg80211_disconnect,
@@ -1160,20 +1384,25 @@ static struct cfg80211_ops wil_cfg80211_ops = {
        .del_station = wil_cfg80211_del_station,
        .probe_client = wil_cfg80211_probe_client,
        .change_bss = wil_cfg80211_change_bss,
+       /* P2P device */
+       .start_p2p_device = wil_cfg80211_start_p2p_device,
+       .stop_p2p_device = wil_cfg80211_stop_p2p_device,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
 {
        wiphy->max_scan_ssids = 1;
        wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+       wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
        wiphy->max_num_pmkids = 0 /* TODO: */;
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_P2P_GO) |
+                                BIT(NL80211_IFTYPE_P2P_DEVICE) |
                                 BIT(NL80211_IFTYPE_MONITOR);
-       /* TODO: enable P2P when integrated with supplicant:
-        * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
-        */
        wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
        dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
                __func__, wiphy->flags);
@@ -1182,7 +1411,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
                NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
                NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
-       wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+       wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
 
        /* TODO: figure this out */
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
@@ -1241,3 +1470,18 @@ void wil_wdev_free(struct wil6210_priv *wil)
        wiphy_free(wdev->wiphy);
        kfree(wdev);
 }
+
+void wil_p2p_wdev_free(struct wil6210_priv *wil)
+{
+       struct wireless_dev *p2p_wdev;
+
+       mutex_lock(&wil->p2p_wdev_mutex);
+       p2p_wdev = wil->p2p_wdev;
+       if (p2p_wdev) {
+               wil->p2p_wdev = NULL;
+               wil->radio_wdev = wil_to_wdev(wil);
+               cfg80211_unregister_wdev(p2p_wdev);
+               kfree(p2p_wdev);
+       }
+       mutex_unlock(&wil->p2p_wdev_mutex);
+}
index 3bbe73b6d05a9caebe7d867d792501c70336cbbd..b338a09175ad5788aeae709e4223c4d57a4256fb 100644 (file)
@@ -37,6 +37,7 @@ enum dbg_off_type {
        doff_x32 = 1,
        doff_ulong = 2,
        doff_io32 = 3,
+       doff_u8 = 4
 };
 
 /* offset to "wil" */
@@ -346,6 +347,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
                                                         tbl[i].mode, dbg,
                                                         base + tbl[i].off);
                        break;
+               case doff_u8:
+                       f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+                                             base + tbl[i].off);
+                       break;
                default:
                        f = ERR_PTR(-EINVAL);
                }
@@ -821,13 +826,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
                                  size_t len, loff_t *ppos)
 {
        struct wil6210_priv *wil = file->private_data;
-       struct wil6210_mbox_hdr_wmi *wmi;
+       struct wmi_cmd_hdr *wmi;
        void *cmd;
-       int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+       int cmdlen = len - sizeof(struct wmi_cmd_hdr);
        u16 cmdid;
        int rc, rc1;
 
-       if (cmdlen <= 0)
+       if (cmdlen < 0)
                return -EINVAL;
 
        wmi = kmalloc(len, GFP_KERNEL);
@@ -840,8 +845,8 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
                return rc;
        }
 
-       cmd = &wmi[1];
-       cmdid = le16_to_cpu(wmi->id);
+       cmd = (cmdlen > 0) ? &wmi[1] : NULL;
+       cmdid = le16_to_cpu(wmi->command_id);
 
        rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
        kfree(wmi);
@@ -985,7 +990,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
                .interval_usec = 0,
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_notify_req_done_event evt;
        } __packed reply;
 
@@ -1333,6 +1338,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
                   r->ssn_last_drop);
 }
 
+static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
+                                  struct wil_tid_crypto_rx *c)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+               if (cc->key_set)
+                       goto has_keys;
+       }
+       return;
+
+has_keys:
+       if (tid < WIL_STA_TID_NUM)
+               seq_printf(s, "  [%2d] PN", tid);
+       else
+               seq_puts(s, "  [GR] PN");
+
+       for (i = 0; i < 4; i++) {
+               struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+               seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
+                          cc->pn);
+       }
+       seq_puts(s, "\n");
+}
+
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
 __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
 {
@@ -1360,18 +1393,25 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                        spin_lock_bh(&p->tid_rx_lock);
                        for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
                                struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+                               struct wil_tid_crypto_rx *c =
+                                               &p->tid_crypto_rx[tid];
 
                                if (r) {
-                                       seq_printf(s, "[%2d] ", tid);
+                                       seq_printf(s, "  [%2d] ", tid);
                                        wil_print_rxtid(s, r);
                                }
+
+                               wil_print_rxtid_crypto(s, tid, c);
                        }
+                       wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
+                                              &p->group_crypto_rx);
                        spin_unlock_bh(&p->tid_rx_lock);
                        seq_printf(s,
-                                  "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+                                  "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
                                   p->stats.rx_non_data_frame,
                                   p->stats.rx_short_frame,
-                                  p->stats.rx_large_frame);
+                                  p->stats.rx_large_frame,
+                                  p->stats.rx_replay);
 
                        seq_puts(s, "Rx/MCS:");
                        for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1487,6 +1527,7 @@ static const struct dbg_off dbg_wil_off[] = {
        WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
        WIL_FIELD(recovery_count, S_IRUGO,              doff_u32),
        WIL_FIELD(ap_isolate,   S_IRUGO,                doff_u32),
+       WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR,    doff_u8),
        {},
 };
 
index 4f2ffa5c6e1735c108e6ab4cdc05e59211dc9b9e..fe66b2b646f0c10aa2dc07c84da31c30d9e02cc3 100644 (file)
@@ -391,12 +391,14 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
        if (isr & ISR_MISC_FW_ERROR) {
+               wil->recovery_state = fw_recovery_pending;
                wil_fw_core_dump(wil);
                wil_notify_fw_error(wil);
                isr &= ~ISR_MISC_FW_ERROR;
-               if (wil->platform_ops.notify_crash) {
+               if (wil->platform_ops.notify) {
                        wil_err(wil, "notify platform driver about FW crash");
-                       wil->platform_ops.notify_crash(wil->platform_handle);
+                       wil->platform_ops.notify(wil->platform_handle,
+                                                WIL_PLATFORM_EVT_FW_CRASH);
                } else {
                        wil_fw_error_recovery(wil);
                }
index f7f9486219516f3d8d713d0377fa574bb5eb1ec9..630380078236db718590da145ea1f56a4188d151 100644 (file)
@@ -161,13 +161,20 @@ out_free:
 
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
 {
+       int ret;
+
        switch (cmd) {
        case WIL_IOCTL_MEMIO:
-               return wil_ioc_memio_dword(wil, data);
+               ret = wil_ioc_memio_dword(wil, data);
+               break;
        case WIL_IOCTL_MEMIO_BLOCK:
-               return wil_ioc_memio_block(wil, data);
+               ret = wil_ioc_memio_block(wil, data);
+               break;
        default:
                wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
                return -ENOIOCTLCMD;
        }
+
+       wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+       return ret;
 }
index 78ba6e04c9445f35a3700d9bcd4a21ce1b42b557..8d4e8843004e279792b9823fc5599a48519f8975 100644 (file)
@@ -27,6 +27,11 @@ bool debug_fw; /* = false; */
 module_param(debug_fw, bool, S_IRUGO);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
+static bool oob_mode;
+module_param(oob_mode, bool, S_IRUGO);
+MODULE_PARM_DESC(oob_mode,
+                " enable out of the box (OOB) mode in FW, for diagnostics and certification");
+
 bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -149,7 +154,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        might_sleep();
        wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
                     sta->status);
-
+       /* inform upper/lower layers */
        if (sta->status != wil_sta_unused) {
                if (!from_event)
                        wmi_disconnect_sta(wil, sta->addr, reason_code, true);
@@ -165,7 +170,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
                }
                sta->status = wil_sta_unused;
        }
-
+       /* reorder buffers */
        for (i = 0; i < WIL_STA_TID_NUM; i++) {
                struct wil_tid_ampdu_rx *r;
 
@@ -177,10 +182,15 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 
                spin_unlock_bh(&sta->tid_rx_lock);
        }
+       /* crypto context */
+       memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
+       memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+       /* release vrings */
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
                if (wil->vring2cid_tid[i][0] == cid)
                        wil_vring_fini_tx(wil, i);
        }
+       /* statistics */
        memset(&sta->stats, 0, sizeof(sta->stats));
 }
 
@@ -300,6 +310,11 @@ void wil_set_recovery_state(struct wil6210_priv *wil, int state)
        wake_up_interruptible(&wil->wq);
 }
 
+bool wil_is_recovery_blocked(struct wil6210_priv *wil)
+{
+       return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
+}
+
 static void wil_fw_error_worker(struct work_struct *work)
 {
        struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
@@ -440,9 +455,8 @@ int wil_priv_init(struct wil6210_priv *wil)
 
        mutex_init(&wil->mutex);
        mutex_init(&wil->wmi_mutex);
-       mutex_init(&wil->back_rx_mutex);
-       mutex_init(&wil->back_tx_mutex);
        mutex_init(&wil->probe_client_mutex);
+       mutex_init(&wil->p2p_wdev_mutex);
 
        init_completion(&wil->wmi_ready);
        init_completion(&wil->wmi_call);
@@ -450,17 +464,15 @@ int wil_priv_init(struct wil6210_priv *wil)
        wil->bcast_vring = -1;
        setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
        setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
+       setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
+                   (ulong)wil);
 
        INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
        INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
        INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
-       INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
-       INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
        INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
 
        INIT_LIST_HEAD(&wil->pending_wmi_ev);
-       INIT_LIST_HEAD(&wil->back_rx_pending);
-       INIT_LIST_HEAD(&wil->back_tx_pending);
        INIT_LIST_HEAD(&wil->probe_client_pending);
        spin_lock_init(&wil->wmi_ev_lock);
        init_waitqueue_head(&wil->wq);
@@ -514,16 +526,14 @@ void wil_priv_deinit(struct wil6210_priv *wil)
 
        wil_set_recovery_state(wil, fw_recovery_idle);
        del_timer_sync(&wil->scan_timer);
+       del_timer_sync(&wil->p2p.discovery_timer);
        cancel_work_sync(&wil->disconnect_worker);
        cancel_work_sync(&wil->fw_error_worker);
+       cancel_work_sync(&wil->p2p.discovery_expired_work);
        mutex_lock(&wil->mutex);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
        mutex_unlock(&wil->mutex);
        wmi_event_flush(wil);
-       wil_back_rx_flush(wil);
-       cancel_work_sync(&wil->back_rx_worker);
-       wil_back_tx_flush(wil);
-       cancel_work_sync(&wil->back_tx_worker);
        wil_probe_client_flush(wil);
        cancel_work_sync(&wil->probe_client_worker);
        destroy_workqueue(wil->wq_service);
@@ -542,6 +552,16 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
        wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
+static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+{
+       wil_info(wil, "%s: enable=%d\n", __func__, enable);
+       if (enable) {
+               wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+       } else {
+               wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+       }
+}
+
 static int wil_target_reset(struct wil6210_priv *wil)
 {
        int delay = 0;
@@ -637,6 +657,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
+       struct wiphy *wiphy = wil_to_wiphy(wil);
        union {
                struct bl_dedicated_registers_v0 bl0;
                struct bl_dedicated_registers_v1 bl1;
@@ -681,6 +702,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil)
        }
 
        ether_addr_copy(ndev->perm_addr, mac);
+       ether_addr_copy(wiphy->perm_addr, mac);
        if (!is_valid_ether_addr(ndev->dev_addr))
                ether_addr_copy(ndev->dev_addr, mac);
 
@@ -767,6 +789,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        if (wil->hw_version == HW_VER_UNKNOWN)
                return -ENODEV;
 
+       if (wil->platform_ops.notify) {
+               rc = wil->platform_ops.notify(wil->platform_handle,
+                                             WIL_PLATFORM_EVT_PRE_RESET);
+               if (rc)
+                       wil_err(wil,
+                               "%s: PRE_RESET platform notify failed, rc %d\n",
+                               __func__, rc);
+       }
+
        set_bit(wil_status_resetting, wil->status);
 
        cancel_work_sync(&wil->disconnect_worker);
@@ -807,6 +838,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        if (rc)
                return rc;
 
+       wil_set_oob_mode(wil, oob_mode);
        if (load_fw) {
                wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
                         WIL_FW2_NAME);
@@ -846,8 +878,27 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
                /* we just started MAC, wait for FW ready */
                rc = wil_wait_for_fw_ready(wil);
-               if (rc == 0) /* check FW is responsive */
-                       rc = wmi_echo(wil);
+               if (rc)
+                       return rc;
+
+               /* check FW is responsive */
+               rc = wmi_echo(wil);
+               if (rc) {
+                       wil_err(wil, "%s: wmi_echo failed, rc %d\n",
+                               __func__, rc);
+                       return rc;
+               }
+
+               if (wil->platform_ops.notify) {
+                       rc = wil->platform_ops.notify(wil->platform_handle,
+                                                     WIL_PLATFORM_EVT_FW_RDY);
+                       if (rc) {
+                               wil_err(wil,
+                                       "%s: FW_RDY notify failed, rc %d\n",
+                                       __func__, rc);
+                               rc = 0;
+                       }
+               }
        }
 
        return rc;
@@ -954,6 +1005,8 @@ int __wil_down(struct wil6210_priv *wil)
        }
        wil_enable_irq(wil);
 
+       (void)wil_p2p_stop_discovery(wil);
+
        if (wil->scan_request) {
                wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
                             wil->scan_request);
index ecc3c1bdae4b535ed1018676eb820fd372f85082..098409753d5b2a10b3e1d6eedb6498ec3c495199 100644 (file)
@@ -60,11 +60,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-       int ret = wil_ioctl(wil, ifr->ifr_data, cmd);
-
-       wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
-
-       return ret;
+       return wil_ioctl(wil, ifr->ifr_data, cmd);
 }
 
 static const struct net_device_ops wil_netdev_ops = {
@@ -149,6 +145,7 @@ void *wil_if_alloc(struct device *dev)
 
        wil = wdev_to_wil(wdev);
        wil->wdev = wdev;
+       wil->radio_wdev = wdev;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -160,7 +157,7 @@ void *wil_if_alloc(struct device *dev)
 
        wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
        /* default monitor channel */
-       ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+       ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
        cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
 
        ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644 (file)
index 0000000..2c1b895
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define P2P_WILDCARD_SSID "DIRECT-"
+#define P2P_DMG_SOCIAL_CHANNEL 2
+#define P2P_SEARCH_DURATION_MS 500
+#define P2P_DEFAULT_BI 100
+
+void wil_p2p_discovery_timer_fn(ulong x)
+{
+       struct wil6210_priv *wil = (void *)x;
+
+       wil_dbg_misc(wil, "%s\n", __func__);
+
+       schedule_work(&wil->p2p.discovery_expired_work);
+}
+
+int wil_p2p_search(struct wil6210_priv *wil,
+                  struct cfg80211_scan_request *request)
+{
+       int rc;
+       struct wil_p2p_info *p2p = &wil->p2p;
+
+       wil_dbg_misc(wil, "%s: channel %d\n",
+                    __func__, P2P_DMG_SOCIAL_CHANNEL);
+
+       mutex_lock(&wil->mutex);
+
+       if (p2p->discovery_started) {
+               wil_err(wil, "%s: search failed. discovery already ongoing\n",
+                       __func__);
+               rc = -EBUSY;
+               goto out;
+       }
+
+       rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
+       if (rc) {
+               wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+               goto out;
+       }
+
+       rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+       if (rc) {
+               wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+               goto out_stop;
+       }
+
+       /* Set application IE to probe request and probe response */
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
+                       request->ie_len, request->ie);
+       if (rc) {
+               wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
+                       __func__);
+               goto out_stop;
+       }
+
+       /* supplicant doesn't provide Probe Response IEs. As a workaround -
+        * re-use Probe Request IEs
+        */
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
+                       request->ie_len, request->ie);
+       if (rc) {
+               wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
+                       __func__);
+               goto out_stop;
+       }
+
+       rc = wmi_start_search(wil);
+       if (rc) {
+               wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+               goto out_stop;
+       }
+
+       p2p->discovery_started = 1;
+       INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
+       mod_timer(&p2p->discovery_timer,
+                 jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
+
+out_stop:
+       if (rc)
+               wmi_stop_discovery(wil);
+
+out:
+       mutex_unlock(&wil->mutex);
+       return rc;
+}
+
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+                  struct ieee80211_channel *chan, u64 *cookie)
+{
+       struct wil_p2p_info *p2p = &wil->p2p;
+       u8 channel = P2P_DMG_SOCIAL_CHANNEL;
+       int rc;
+
+       if (chan)
+               channel = chan->hw_value;
+
+       wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+
+       mutex_lock(&wil->mutex);
+
+       if (p2p->discovery_started) {
+               wil_err(wil, "%s: discovery already ongoing\n", __func__);
+               rc = -EBUSY;
+               goto out;
+       }
+
+       rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
+       if (rc) {
+               wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+               goto out;
+       }
+
+       rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+       if (rc) {
+               wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+               goto out_stop;
+       }
+
+       rc = wmi_start_listen(wil);
+       if (rc) {
+               wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
+               goto out_stop;
+       }
+
+       memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+       *cookie = ++p2p->cookie;
+
+       p2p->discovery_started = 1;
+       INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+       mod_timer(&p2p->discovery_timer,
+                 jiffies + msecs_to_jiffies(duration));
+
+out_stop:
+       if (rc)
+               wmi_stop_discovery(wil);
+
+out:
+       mutex_unlock(&wil->mutex);
+       return rc;
+}
+
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
+{
+       struct wil_p2p_info *p2p = &wil->p2p;
+       u8 started = p2p->discovery_started;
+
+       if (p2p->discovery_started) {
+               del_timer_sync(&p2p->discovery_timer);
+               p2p->discovery_started = 0;
+               wmi_stop_discovery(wil);
+       }
+
+       return started;
+}
+
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
+{
+       struct wil_p2p_info *p2p = &wil->p2p;
+       u8 started;
+
+       mutex_lock(&wil->mutex);
+
+       if (cookie != p2p->cookie) {
+               wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+                        __func__, p2p->cookie, cookie);
+               mutex_unlock(&wil->mutex);
+               return -ENOENT;
+       }
+
+       started = wil_p2p_stop_discovery(wil);
+
+       mutex_unlock(&wil->mutex);
+
+       if (!started) {
+               wil_err(wil, "%s: listen not started\n", __func__);
+               return -ENOENT;
+       }
+
+       mutex_lock(&wil->p2p_wdev_mutex);
+       cfg80211_remain_on_channel_expired(wil->radio_wdev,
+                                          p2p->cookie,
+                                          &p2p->listen_chan,
+                                          GFP_KERNEL);
+       wil->radio_wdev = wil->wdev;
+       mutex_unlock(&wil->p2p_wdev_mutex);
+       return 0;
+}
+
+void wil_p2p_listen_expired(struct work_struct *work)
+{
+       struct wil_p2p_info *p2p = container_of(work,
+                       struct wil_p2p_info, discovery_expired_work);
+       struct wil6210_priv *wil = container_of(p2p,
+                       struct wil6210_priv, p2p);
+       u8 started;
+
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
+       mutex_lock(&wil->mutex);
+       started = wil_p2p_stop_discovery(wil);
+       mutex_unlock(&wil->mutex);
+
+       if (started) {
+               mutex_lock(&wil->p2p_wdev_mutex);
+               cfg80211_remain_on_channel_expired(wil->radio_wdev,
+                                                  p2p->cookie,
+                                                  &p2p->listen_chan,
+                                                  GFP_KERNEL);
+               wil->radio_wdev = wil->wdev;
+               mutex_unlock(&wil->p2p_wdev_mutex);
+       }
+
+}
+
+void wil_p2p_search_expired(struct work_struct *work)
+{
+       struct wil_p2p_info *p2p = container_of(work,
+                       struct wil_p2p_info, discovery_expired_work);
+       struct wil6210_priv *wil = container_of(p2p,
+                       struct wil6210_priv, p2p);
+       u8 started;
+
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
+       mutex_lock(&wil->mutex);
+       started = wil_p2p_stop_discovery(wil);
+       mutex_unlock(&wil->mutex);
+
+       if (started) {
+               mutex_lock(&wil->p2p_wdev_mutex);
+               cfg80211_scan_done(wil->scan_request, 0);
+               wil->scan_request = NULL;
+               wil->radio_wdev = wil->wdev;
+               mutex_unlock(&wil->p2p_wdev_mutex);
+       }
+}
index e36f2a0c8cb67547a27774a7736e70e841fd4171..aeb72c438e4465927fce36fa7e09a3d665574b97 100644 (file)
@@ -275,6 +275,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
        if (wil->platform_ops.uninit)
                wil->platform_ops.uninit(wil->platform_handle);
+       wil_p2p_wdev_free(wil);
        wil_if_free(wil);
 }
 
index 32031e7a11d58a96b1b83671ab45c519e3282f99..19ed127d4d055703d14e018a1f278a5b8d9dbd89 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -291,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
        return min(max_agg_size, req_agg_wsize);
 }
 
-/* Block Ack - Rx side (recipient */
+/* Block Ack - Rx side (recipient) */
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
                         u8 dialog_token, __le16 ba_param_set,
                         __le16 ba_timeout, __le16 ba_seq_ctrl)
-{
-       struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
-       if (!req)
-               return -ENOMEM;
-
-       req->cidxtid = cidxtid;
-       req->dialog_token = dialog_token;
-       req->ba_param_set = le16_to_cpu(ba_param_set);
-       req->ba_timeout = le16_to_cpu(ba_timeout);
-       req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
-
-       mutex_lock(&wil->back_rx_mutex);
-       list_add_tail(&req->list, &wil->back_rx_pending);
-       mutex_unlock(&wil->back_rx_mutex);
-
-       queue_work(wil->wq_service, &wil->back_rx_worker);
-
-       return 0;
-}
-
-static void wil_back_rx_handle(struct wil6210_priv *wil,
-                              struct wil_back_rx *req)
 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 {
+       u16 param_set = le16_to_cpu(ba_param_set);
+       u16 agg_timeout = le16_to_cpu(ba_timeout);
+       u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
        struct wil_sta_info *sta;
        u8 cid, tid;
        u16 agg_wsize = 0;
@@ -328,34 +308,35 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
         * bits 2..5: TID
         * bits 6..15: buffer size
         */
-       u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
-       bool agg_amsdu = !!(req->ba_param_set & BIT(0));
-       int ba_policy = req->ba_param_set & BIT(1);
-       u16 agg_timeout = req->ba_timeout;
+       u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
+       bool agg_amsdu = !!(param_set & BIT(0));
+       int ba_policy = param_set & BIT(1);
        u16 status = WLAN_STATUS_SUCCESS;
-       u16 ssn = req->ba_seq_ctrl >> 4;
+       u16 ssn = seq_ctrl >> 4;
        struct wil_tid_ampdu_rx *r;
-       int rc;
+       int rc = 0;
 
        might_sleep();
-       parse_cidxtid(req->cidxtid, &cid, &tid);
+       parse_cidxtid(cidxtid, &cid, &tid);
 
        /* sanity checks */
        if (cid >= WIL6210_MAX_CID) {
                wil_err(wil, "BACK: invalid CID %d\n", cid);
-               return;
+               rc = -EINVAL;
+               goto out;
        }
 
        sta = &wil->sta[cid];
        if (sta->status != wil_sta_connected) {
                wil_err(wil, "BACK: CID %d not connected\n", cid);
-               return;
+               rc = -EINVAL;
+               goto out;
        }
 
        wil_dbg_wmi(wil,
                    "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
-                   cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
-                   agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+                   cid, sta->addr, tid, req_agg_wsize, agg_timeout,
+                   agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
 
        /* apply policies */
        if (ba_policy) {
@@ -365,10 +346,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        if (status == WLAN_STATUS_SUCCESS)
                agg_wsize = wil_agg_size(wil, req_agg_wsize);
 
-       rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+       rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
                               agg_amsdu, agg_wsize, agg_timeout);
-       if (rc || (status != WLAN_STATUS_SUCCESS))
-               return;
+       if (rc || (status != WLAN_STATUS_SUCCESS)) {
+               wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
+                       __func__, rc, status);
+               goto out;
+       }
 
        /* apply */
        r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
@@ -376,143 +360,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
        sta->tid_rx[tid] = r;
        spin_unlock_bh(&sta->tid_rx_lock);
-}
-
-void wil_back_rx_flush(struct wil6210_priv *wil)
-{
-       struct wil_back_rx *evt, *t;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
-
-       mutex_lock(&wil->back_rx_mutex);
-
-       list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
-               list_del(&evt->list);
-               kfree(evt);
-       }
-
-       mutex_unlock(&wil->back_rx_mutex);
-}
-
-/* Retrieve next ADDBA request from the pending list */
-static struct list_head *next_back_rx(struct wil6210_priv *wil)
-{
-       struct list_head *ret = NULL;
-
-       mutex_lock(&wil->back_rx_mutex);
-
-       if (!list_empty(&wil->back_rx_pending)) {
-               ret = wil->back_rx_pending.next;
-               list_del(ret);
-       }
-
-       mutex_unlock(&wil->back_rx_mutex);
-
-       return ret;
-}
-
-void wil_back_rx_worker(struct work_struct *work)
-{
-       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
-                                               back_rx_worker);
-       struct wil_back_rx *evt;
-       struct list_head *lh;
-
-       while ((lh = next_back_rx(wil)) != NULL) {
-               evt = list_entry(lh, struct wil_back_rx, list);
-
-               wil_back_rx_handle(wil, evt);
-               kfree(evt);
-       }
+out:
+       return rc;
 }
 
-/* BACK - Tx (originator) side */
-static void wil_back_tx_handle(struct wil6210_priv *wil,
-                              struct wil_back_tx *req)
+/* BACK - Tx side (originator) */
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
 {
-       struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
-       int rc;
+       u8 agg_wsize = wil_agg_size(wil, wsize);
+       u16 agg_timeout = 0;
+       struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+       int rc = 0;
 
        if (txdata->addba_in_progress) {
                wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
-                            req->ringid);
-               return;
+                            ringid);
+               goto out;
        }
        if (txdata->agg_wsize) {
                wil_dbg_misc(wil,
-                            "ADDBA for vring[%d] already established wsize %d\n",
-                            req->ringid, txdata->agg_wsize);
-               return;
+                            "ADDBA for vring[%d] already done for wsize %d\n",
+                            ringid, txdata->agg_wsize);
+               goto out;
        }
        txdata->addba_in_progress = true;
-       rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
-       if (rc)
+       rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
+       if (rc) {
+               wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
                txdata->addba_in_progress = false;
-}
-
-static struct list_head *next_back_tx(struct wil6210_priv *wil)
-{
-       struct list_head *ret = NULL;
-
-       mutex_lock(&wil->back_tx_mutex);
-
-       if (!list_empty(&wil->back_tx_pending)) {
-               ret = wil->back_tx_pending.next;
-               list_del(ret);
-       }
-
-       mutex_unlock(&wil->back_tx_mutex);
-
-       return ret;
-}
-
-void wil_back_tx_worker(struct work_struct *work)
-{
-       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
-                                                back_tx_worker);
-       struct wil_back_tx *evt;
-       struct list_head *lh;
-
-       while ((lh = next_back_tx(wil)) != NULL) {
-               evt = list_entry(lh, struct wil_back_tx, list);
-
-               wil_back_tx_handle(wil, evt);
-               kfree(evt);
        }
-}
-
-void wil_back_tx_flush(struct wil6210_priv *wil)
-{
-       struct wil_back_tx *evt, *t;
-
-       wil_dbg_misc(wil, "%s()\n", __func__);
-
-       mutex_lock(&wil->back_tx_mutex);
-
-       list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
-               list_del(&evt->list);
-               kfree(evt);
-       }
-
-       mutex_unlock(&wil->back_tx_mutex);
-}
-
-int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
-{
-       struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
-       if (!req)
-               return -ENOMEM;
 
-       req->ringid = ringid;
-       req->agg_wsize = wil_agg_size(wil, wsize);
-       req->agg_timeout = 0;
-
-       mutex_lock(&wil->back_tx_mutex);
-       list_add_tail(&req->list, &wil->back_tx_pending);
-       mutex_unlock(&wil->back_tx_mutex);
-
-       queue_work(wil->wq_service, &wil->back_tx_worker);
-
-       return 0;
+out:
+       return rc;
 }
index e59239d22b9493cee2cb1bdc2f2d6066a86e94e2..c4db2a9d9f7f6c8338e5f35d331069e93bccf89d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {}
 #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
 
 DECLARE_EVENT_CLASS(wil6210_wmi,
-       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+       TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
 
        TP_ARGS(wmi, buf, buf_len),
 
        TP_STRUCT__entry(
                __field(u8, mid)
-               __field(u16, id)
-               __field(u32, timestamp)
+               __field(u16, command_id)
+               __field(u32, fw_timestamp)
                __field(u16, buf_len)
                __dynamic_array(u8, buf, buf_len)
        ),
 
        TP_fast_assign(
                __entry->mid = wmi->mid;
-               __entry->id = le16_to_cpu(wmi->id);
-               __entry->timestamp = le32_to_cpu(wmi->timestamp);
+               __entry->command_id = le16_to_cpu(wmi->command_id);
+               __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
                __entry->buf_len = buf_len;
                memcpy(__get_dynamic_array(buf), buf, buf_len);
        ),
 
        TP_printk(
                "MID %d id 0x%04x len %d timestamp %d",
-               __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
+               __entry->mid, __entry->command_id, __entry->buf_len,
+               __entry->fw_timestamp
        )
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
-       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+       TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
        TP_ARGS(wmi, buf, buf_len)
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
-       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+       TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
        TP_ARGS(wmi, buf, buf_len)
 );
 
index 6af20903cf89fac32bd3f96d14d8b4ed5923d60a..f260b232fd57114907e5aa5664db28094ba15bc4 100644 (file)
@@ -549,6 +549,60 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
        return rc;
 }
 
+/**
+ * reverse_memcmp - Compare two areas of memory, in reverse order
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ *
+ * Cut'n'paste from original memcmp (see lib/string.c)
+ * with minimal modifications
+ */
+static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+{
+       const unsigned char *su1, *su2;
+       int res = 0;
+
+       for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
+            --su1, --su2, count--) {
+               res = *su1 - *su2;
+               if (res)
+                       break;
+       }
+       return res;
+}
+
+static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+       struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+       int cid = wil_rxdesc_cid(d);
+       int tid = wil_rxdesc_tid(d);
+       int key_id = wil_rxdesc_key_id(d);
+       int mc = wil_rxdesc_mcast(d);
+       struct wil_sta_info *s = &wil->sta[cid];
+       struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+                                     &s->tid_crypto_rx[tid];
+       struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+       const u8 *pn = (u8 *)&d->mac.pn_15_0;
+
+       if (!cc->key_set) {
+               wil_err_ratelimited(wil,
+                                   "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+                                   cid, tid, mc, key_id);
+               return -EINVAL;
+       }
+
+       if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+               wil_err_ratelimited(wil,
+                                   "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+                                   cid, tid, mc, key_id, pn, cc->pn);
+               return -EINVAL;
+       }
+       memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+       return 0;
+}
+
 /*
  * Pass Rx packet to the netif. Update statistics.
  * Called in softirq context (NAPI poll).
@@ -561,6 +615,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
        unsigned int len = skb->len;
        struct vring_rx_desc *d = wil_skb_rxdesc(skb);
        int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+       int security = wil_rxdesc_security(d);
        struct ethhdr *eth = (void *)skb->data;
        /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
         * is not suitable, need to look at data
@@ -586,6 +641,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 
        skb_orphan(skb);
 
+       if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+               rc = GRO_DROP;
+               dev_kfree_skb(skb);
+               stats->rx_replay++;
+               goto stats;
+       }
+
        if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
                if (mcast) {
                        /* send multicast frames both to higher layers in
@@ -627,6 +689,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
                             len, gro_res_str[rc]);
        }
+stats:
        /* statistics. rc set to GRO_NORMAL for AP bridging */
        if (unlikely(rc == GRO_DROP)) {
                ndev->stats.rx_dropped++;
@@ -757,7 +820,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                },
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_vring_cfg_done_event cmd;
        } __packed reply;
        struct vring *vring = &wil->vring_tx[id];
@@ -834,7 +897,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
                },
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_vring_cfg_done_event cmd;
        } __packed reply;
        struct vring *vring = &wil->vring_tx[id];
index ee7c7b4b9a178845e94530e0b63b3a62c4594456..fcdffaa8251bd812f7819581d06d8acf0a1ca353 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
        return WIL_GET_BITS(d->mac.d0, 28, 31);
 }
 
+static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 4, 5);
+}
+
+static inline int wil_rxdesc_security(struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 7, 7);
+}
+
 static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
 {
        return WIL_GET_BITS(d->mac.d1, 8, 9);
index 8427d68b6fa8e875a2ae13cbdcaf5b3b187b19e9..4d699ea463732cae27ab30dfa06beb33e38893f3 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/cfg80211.h>
 #include <linux/timex.h>
 #include <linux/types.h>
+#include "wmi.h"
 #include "wil_platform.h"
 
 extern bool no_fw_recovery;
@@ -131,6 +132,7 @@ struct RGF_ICR {
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1               (0x880004)
 #define RGF_USER_USAGE_6               (0x880018)
+       #define BIT_USER_OOB_MODE               BIT(31)
 #define RGF_USER_HW_MACHINE_STATE      (0x8801dc)
        #define HW_MACHINE_BOOT_DONE    (0x3fffffd)
 #define RGF_USER_USER_CPU_0            (0x8801e0)
@@ -334,29 +336,11 @@ struct wil6210_mbox_hdr {
 /* max. value for wil6210_mbox_hdr.len */
 #define MAX_MBOXITEM_SIZE   (240)
 
-/**
- * struct wil6210_mbox_hdr_wmi - WMI header
- *
- * @mid: MAC ID
- *     00 - default, created by FW
- *     01..0f - WiFi ports, driver to create
- *     10..fe - debug
- *     ff - broadcast
- * @id: command/event ID
- * @timestamp: FW fills for events, free-running msec timer
- */
-struct wil6210_mbox_hdr_wmi {
-       u8 mid;
-       u8 reserved;
-       __le16 id;
-       __le32 timestamp;
-} __packed;
-
 struct pending_wmi_event {
        struct list_head list;
        struct {
                struct wil6210_mbox_hdr hdr;
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                u8 data[0];
        } __packed event;
 };
@@ -455,6 +439,29 @@ struct wil_tid_ampdu_rx {
        bool first_time; /* is it 1-st time this buffer used? */
 };
 
+/**
+ * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
+ *
+ * @pn: GCMP PN for the session
+ * @key_set: valid key present
+ */
+struct wil_tid_crypto_rx_single {
+       u8 pn[IEEE80211_GCMP_PN_LEN];
+       bool key_set;
+};
+
+struct wil_tid_crypto_rx {
+       struct wil_tid_crypto_rx_single key_id[4];
+};
+
+struct wil_p2p_info {
+       struct ieee80211_channel listen_chan;
+       u8 discovery_started;
+       u64 cookie;
+       struct timer_list discovery_timer; /* listen/search duration */
+       struct work_struct discovery_expired_work; /* listen/search expire */
+};
+
 enum wil_sta_status {
        wil_sta_unused = 0,
        wil_sta_conn_pending = 1,
@@ -474,6 +481,7 @@ struct wil_net_stats {
        unsigned long   rx_non_data_frame;
        unsigned long   rx_short_frame;
        unsigned long   rx_large_frame;
+       unsigned long   rx_replay;
        u16 last_mcs_rx;
        u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
@@ -495,6 +503,8 @@ struct wil_sta_info {
        spinlock_t tid_rx_lock; /* guarding tid_rx array */
        unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
        unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+       struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
+       struct wil_tid_crypto_rx group_crypto_rx;
 };
 
 enum {
@@ -507,24 +517,6 @@ enum {
        hw_capability_last
 };
 
-struct wil_back_rx {
-       struct list_head list;
-       /* request params, converted to CPU byte order - what we asked for */
-       u8 cidxtid;
-       u8 dialog_token;
-       u16 ba_param_set;
-       u16 ba_timeout;
-       u16 ba_seq_ctrl;
-};
-
-struct wil_back_tx {
-       struct list_head list;
-       /* request params, converted to CPU byte order - what we asked for */
-       u8 ringid;
-       u8 agg_wsize;
-       u16 agg_timeout;
-};
-
 struct wil_probe_client_req {
        struct list_head list;
        u64 cookie;
@@ -595,13 +587,6 @@ struct wil6210_priv {
        spinlock_t wmi_ev_lock;
        struct napi_struct napi_rx;
        struct napi_struct napi_tx;
-       /* BACK */
-       struct list_head back_rx_pending;
-       struct mutex back_rx_mutex; /* protect @back_rx_pending */
-       struct work_struct back_rx_worker;
-       struct list_head back_tx_pending;
-       struct mutex back_tx_mutex; /* protect @back_tx_pending */
-       struct work_struct back_tx_worker;
        /* keep alive */
        struct list_head probe_client_pending;
        struct mutex probe_client_mutex; /* protect @probe_client_pending */
@@ -622,11 +607,21 @@ struct wil6210_priv {
        /* debugfs */
        struct dentry *debug;
        struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+       u8 discovery_mode;
 
        void *platform_handle;
        struct wil_platform_ops platform_ops;
 
        struct pmc_ctx pmc;
+
+       bool pbss;
+
+       struct wil_p2p_info p2p;
+
+       /* P2P_DEVICE vif */
+       struct wireless_dev *p2p_wdev;
+       struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+       struct wireless_dev *radio_wdev;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -722,6 +717,7 @@ void wil_priv_deinit(struct wil6210_priv *wil);
 int wil_reset(struct wil6210_priv *wil, bool no_fw);
 void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+bool wil_is_recovery_blocked(struct wil6210_priv *wil);
 int wil_up(struct wil6210_priv *wil);
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
@@ -752,7 +748,6 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
@@ -765,11 +760,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
                         u8 dialog_token, __le16 ba_param_set,
                         __le16 ba_timeout, __le16 ba_seq_ctrl);
-void wil_back_rx_worker(struct work_struct *work);
-void wil_back_rx_flush(struct wil6210_priv *wil);
 int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
-void wil_back_tx_worker(struct work_struct *work);
-void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
 int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
@@ -779,6 +770,24 @@ void wil_unmask_irq(struct wil6210_priv *wil);
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
 void wil_disable_irq(struct wil6210_priv *wil);
 void wil_enable_irq(struct wil6210_priv *wil);
+
+/* P2P */
+void wil_p2p_discovery_timer_fn(ulong x);
+int wil_p2p_search(struct wil6210_priv *wil,
+                  struct cfg80211_scan_request *request);
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+                  struct ieee80211_channel *chan, u64 *cookie);
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
+void wil_p2p_listen_expired(struct work_struct *work);
+void wil_p2p_search_expired(struct work_struct *work);
+
+/* WMI for P2P */
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
+int wmi_start_listen(struct wil6210_priv *wil);
+int wmi_start_search(struct wil6210_priv *wil);
+int wmi_stop_discovery(struct wil6210_priv *wil);
+
 int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                         struct cfg80211_mgmt_tx_params *params,
                         u64 *cookie);
@@ -790,10 +799,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
 
 struct wireless_dev *wil_cfg80211_init(struct device *dev);
 void wil_wdev_free(struct wil6210_priv *wil);
+void wil_p2p_wdev_free(struct wil6210_priv *wil);
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
-                 u8 chan, u8 hidden_ssid);
+                 u8 chan, u8 hidden_ssid, u8 is_go);
 int wmi_pcp_stop(struct wil6210_priv *wil);
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
                        u16 reason_code, bool from_event);
index 9a949d9103438f459c8e715bf83e1d16c56853a0..33d4a34b3b1c005371733517678a8f8cfb1eed7c 100644 (file)
 
 struct device;
 
+enum wil_platform_event {
+       WIL_PLATFORM_EVT_FW_CRASH = 0,
+       WIL_PLATFORM_EVT_PRE_RESET = 1,
+       WIL_PLATFORM_EVT_FW_RDY = 2,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -28,7 +34,7 @@ struct wil_platform_ops {
        int (*suspend)(void *handle);
        int (*resume)(void *handle);
        void (*uninit)(void *handle);
-       int (*notify_crash)(void *handle);
+       int (*notify)(void *handle, enum wil_platform_event evt);
 };
 
 /**
index 493e721c4fa715ba23ac86e153fe4626d389e9fb..6ca28c3eff0a223ab9f0ef538ab9c7ada81af22b 100644 (file)
@@ -176,7 +176,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 {
        struct {
                struct wil6210_mbox_hdr hdr;
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
        } __packed cmd = {
                .hdr = {
                        .type = WIL_MBOX_HDR_TYPE_WMI,
@@ -185,7 +185,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
                },
                .wmi = {
                        .mid = 0,
-                       .id = cpu_to_le16(cmdid),
+                       .command_id = cpu_to_le16(cmdid),
                },
        };
        struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -333,7 +333,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        }
 
        ch_no = data->info.channel + 1;
-       freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+       freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
        channel = ieee80211_get_channel(wiphy, freq);
        signal = data->info.sqi;
        d_status = le16_to_cpu(data->info.status);
@@ -368,6 +368,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
                wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
                                 ie_len, true);
 
+               wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
                bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
                                                d_len, signal, GFP_KERNEL);
                if (bss) {
@@ -378,8 +380,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
                        wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
                }
        } else {
-               cfg80211_rx_mgmt(wil->wdev, freq, signal,
+               mutex_lock(&wil->p2p_wdev_mutex);
+               cfg80211_rx_mgmt(wil->radio_wdev, freq, signal,
                                 (void *)rx_mgmt_frame, d_len, 0);
+               mutex_unlock(&wil->p2p_wdev_mutex);
        }
 }
 
@@ -406,7 +410,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                             wil->scan_request, aborted);
 
                del_timer_sync(&wil->scan_timer);
+               mutex_lock(&wil->p2p_wdev_mutex);
                cfg80211_scan_done(wil->scan_request, aborted);
+               wil->radio_wdev = wil->wdev;
+               mutex_unlock(&wil->p2p_wdev_mutex);
                wil->scan_request = NULL;
        } else {
                wil_err(wil, "SCAN_COMPLETE while not scanning\n");
@@ -487,6 +494,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
                        return;
                }
                del_timer_sync(&wil->connect_timer);
+       } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+                  (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+               if (wil->sta[evt->cid].status != wil_sta_unused) {
+                       wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
+                               __func__, wil->sta[evt->cid].status, evt->cid);
+                       mutex_unlock(&wil->mutex);
+                       return;
+               }
        }
 
        /* FIXME FW can transmit only ucast frames to peer */
@@ -648,7 +663,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
                              int len)
 {
-       struct wmi_vring_ba_status_event *evt = d;
+       struct wmi_ba_status_event *evt = d;
        struct vring_tx_data *txdata;
 
        wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
@@ -834,10 +849,10 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                      offsetof(struct wil6210_mbox_ring_desc, sync), 0);
                /* indicate */
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
-                   (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-                       struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
-                       u16 id = le16_to_cpu(wmi->id);
-                       u32 tstamp = le32_to_cpu(wmi->timestamp);
+                   (len >= sizeof(struct wmi_cmd_hdr))) {
+                       struct wmi_cmd_hdr *wmi = &evt->event.wmi;
+                       u16 id = le16_to_cpu(wmi->command_id);
+                       u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
                        spin_lock_irqsave(&wil->wmi_ev_lock, flags);
                        if (wil->reply_id && wil->reply_id == id) {
                                if (wil->reply_buf) {
@@ -947,7 +962,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
 }
 
 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
-                 u8 chan, u8 hidden_ssid)
+                 u8 chan, u8 hidden_ssid, u8 is_go)
 {
        int rc;
 
@@ -958,9 +973,10 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
                .channel = chan - 1,
                .pcp_max_assoc_sta = max_assoc_sta,
                .hidden_ssid = hidden_ssid,
+               .is_go = is_go,
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_pcp_started_event evt;
        } __packed reply;
 
@@ -1014,7 +1030,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
 {
        int rc;
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_set_ssid_cmd cmd;
        } __packed reply;
        int len; /* reply.cmd.ssid_len in CPU order */
@@ -1047,7 +1063,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
 {
        int rc;
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_set_pcp_channel_cmd cmd;
        } __packed reply;
 
@@ -1064,14 +1080,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
        return 0;
 }
 
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi)
 {
+       int rc;
        struct wmi_p2p_cfg_cmd cmd = {
-               .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+               .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
+               .bcon_interval = cpu_to_le16(bi),
                .channel = channel - 1,
        };
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_p2p_cfg_done_event evt;
+       } __packed reply;
+
+       wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
+
+       rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd),
+                     WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
+       if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
+               rc = -EINVAL;
+       }
 
-       return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+       return rc;
+}
+
+int wmi_start_listen(struct wil6210_priv *wil)
+{
+       int rc;
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_listen_started_event evt;
+       } __packed reply;
+
+       wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
+
+       rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+                     WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
+       if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "device failed to start listen. status %d\n",
+                       reply.evt.status);
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+int wmi_start_search(struct wil6210_priv *wil)
+{
+       int rc;
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_search_started_event evt;
+       } __packed reply;
+
+       wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
+
+       rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0,
+                     WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
+       if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "device failed to start search. status %d\n",
+                       reply.evt.status);
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+int wmi_stop_discovery(struct wil6210_priv *wil)
+{
+       int rc;
+
+       wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
+
+       rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+                     WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
+
+       if (rc)
+               wil_err(wil, "Failed to stop discovery\n");
+
+       return rc;
 }
 
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -1155,7 +1243,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
 {
        int rc;
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_listen_started_event evt;
        } __packed reply;
 
@@ -1192,7 +1280,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_cfg_rx_chain_done_event evt;
        } __packed evt;
        int rc;
@@ -1246,7 +1334,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
                .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_temp_sense_done_event evt;
        } __packed reply;
 
@@ -1272,7 +1360,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
                .disconnect_reason = cpu_to_le16(reason),
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_disconnect_event evt;
        } __packed reply;
 
@@ -1364,7 +1452,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
                .ba_timeout = cpu_to_le16(timeout),
        };
        struct {
-               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cmd_hdr wmi;
                struct wmi_rcp_addba_resp_sent_event evt;
        } __packed reply;
 
@@ -1420,10 +1508,10 @@ static void wmi_event_handle(struct wil6210_priv *wil,
        u16 len = le16_to_cpu(hdr->len);
 
        if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
-           (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-               struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+           (len >= sizeof(struct wmi_cmd_hdr))) {
+               struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
                void *evt_data = (void *)(&wmi[1]);
-               u16 id = le16_to_cpu(wmi->id);
+               u16 id = le16_to_cpu(wmi->command_id);
 
                wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
                            id, wil->reply_id);
index 6e90e78f1554c84c3f0ab820ede5ed4c5437ce3d..29865e0b5203786de386cf6519caa1008f17b502 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- * Copyright (c) 2006-2012 Wilocity .
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
 
 /*
  * This file contains the definitions of the WMI protocol specified in the
- * Wireless Module Interface (WMI) for the Wilocity
- * MARLON 60 Gigabit wireless solution.
+ * Wireless Module Interface (WMI) for the Qualcomm
+ * 60 GHz wireless solution.
  * It includes definitions of all the commands and events.
  * Commands are messages from the host to the WM.
  * Events are messages from the WM to the host.
+ *
+ * This is an automatically generated file.
  */
 
 #ifndef __WILOCITY_WMI_H__
 #define __WILOCITY_WMI_H__
 
 /* General */
-#define WILOCITY_MAX_ASSOC_STA (8)
-#define WILOCITY_DEFAULT_ASSOC_STA (1)
-#define WMI_MAC_LEN            (6)
-#define WMI_PROX_RANGE_NUM     (3)
-#define WMI_MAX_LOSS_DMG_BEACONS       (32)
+#define WMI_MAX_ASSOC_STA              (8)
+#define WMI_DEFAULT_ASSOC_STA          (1)
+#define WMI_MAC_LEN                    (6)
+#define WMI_PROX_RANGE_NUM             (3)
+#define WMI_MAX_LOSS_DMG_BEACONS       (20)
+
+/* Mailbox interface
+ * used for commands and events
+ */
+enum wmi_mid {
+       MID_DEFAULT             = 0x00,
+       FIRST_DBG_MID_ID        = 0x10,
+       LAST_DBG_MID_ID         = 0xFE,
+       MID_BROADCAST           = 0xFF,
+};
+
+/* WMI_CMD_HDR */
+struct wmi_cmd_hdr {
+       u8 mid;
+       u8 reserved;
+       __le16 command_id;
+       __le32 fw_timestamp;
+} __packed;
 
 /* List of Commands */
 enum wmi_command_id {
-       WMI_CONNECT_CMDID               = 0x0001,
-       WMI_DISCONNECT_CMDID            = 0x0003,
-       WMI_DISCONNECT_STA_CMDID        = 0x0004,
-       WMI_START_SCAN_CMDID            = 0x0007,
-       WMI_SET_BSS_FILTER_CMDID        = 0x0009,
-       WMI_SET_PROBED_SSID_CMDID       = 0x000a,
-       WMI_SET_LISTEN_INT_CMDID        = 0x000b,
-       WMI_BCON_CTRL_CMDID             = 0x000f,
-       WMI_ADD_CIPHER_KEY_CMDID        = 0x0016,
-       WMI_DELETE_CIPHER_KEY_CMDID     = 0x0017,
-       WMI_SET_APPIE_CMDID             = 0x003f,
-       WMI_SET_WSC_STATUS_CMDID        = 0x0041,
-       WMI_PXMT_RANGE_CFG_CMDID        = 0x0042,
-       WMI_PXMT_SNR2_RANGE_CFG_CMDID   = 0x0043,
-/*     WMI_FAST_MEM_ACC_MODE_CMDID     = 0x0300, */
-       WMI_MEM_READ_CMDID              = 0x0800,
-       WMI_MEM_WR_CMDID                = 0x0801,
-       WMI_ECHO_CMDID                  = 0x0803,
-       WMI_DEEP_ECHO_CMDID             = 0x0804,
-       WMI_CONFIG_MAC_CMDID            = 0x0805,
-       WMI_CONFIG_PHY_DEBUG_CMDID      = 0x0806,
-       WMI_ADD_DEBUG_TX_PCKT_CMDID     = 0x0808,
-       WMI_PHY_GET_STATISTICS_CMDID    = 0x0809,
-       WMI_FS_TUNE_CMDID               = 0x080a,
-       WMI_CORR_MEASURE_CMDID          = 0x080b,
-       WMI_READ_RSSI_CMDID             = 0x080c,
-       WMI_TEMP_SENSE_CMDID            = 0x080e,
-       WMI_DC_CALIB_CMDID              = 0x080f,
-       WMI_SEND_TONE_CMDID             = 0x0810,
-       WMI_IQ_TX_CALIB_CMDID           = 0x0811,
-       WMI_IQ_RX_CALIB_CMDID           = 0x0812,
-       WMI_SET_UCODE_IDLE_CMDID        = 0x0813,
-       WMI_SET_WORK_MODE_CMDID         = 0x0815,
-       WMI_LO_LEAKAGE_CALIB_CMDID      = 0x0816,
-       WMI_MARLON_R_READ_CMDID         = 0x0818,
-       WMI_MARLON_R_WRITE_CMDID        = 0x0819,
-       WMI_MARLON_R_TXRX_SEL_CMDID     = 0x081a,
-       MAC_IO_STATIC_PARAMS_CMDID      = 0x081b,
-       MAC_IO_DYNAMIC_PARAMS_CMDID     = 0x081c,
-       WMI_SILENT_RSSI_CALIB_CMDID     = 0x081d,
-       WMI_RF_RX_TEST_CMDID            = 0x081e,
-       WMI_CFG_RX_CHAIN_CMDID          = 0x0820,
-       WMI_VRING_CFG_CMDID             = 0x0821,
-       WMI_BCAST_VRING_CFG_CMDID       = 0x0822,
-       WMI_VRING_BA_EN_CMDID           = 0x0823,
-       WMI_VRING_BA_DIS_CMDID          = 0x0824,
-       WMI_RCP_ADDBA_RESP_CMDID        = 0x0825,
-       WMI_RCP_DELBA_CMDID             = 0x0826,
-       WMI_SET_SSID_CMDID              = 0x0827,
-       WMI_GET_SSID_CMDID              = 0x0828,
-       WMI_SET_PCP_CHANNEL_CMDID       = 0x0829,
-       WMI_GET_PCP_CHANNEL_CMDID       = 0x082a,
-       WMI_SW_TX_REQ_CMDID             = 0x082b,
-       WMI_READ_MAC_RXQ_CMDID          = 0x0830,
-       WMI_READ_MAC_TXQ_CMDID          = 0x0831,
-       WMI_WRITE_MAC_RXQ_CMDID         = 0x0832,
-       WMI_WRITE_MAC_TXQ_CMDID         = 0x0833,
-       WMI_WRITE_MAC_XQ_FIELD_CMDID    = 0x0834,
-       WMI_MLME_PUSH_CMDID             = 0x0835,
-       WMI_BEAMFORMING_MGMT_CMDID      = 0x0836,
-       WMI_BF_TXSS_MGMT_CMDID          = 0x0837,
-       WMI_BF_SM_MGMT_CMDID            = 0x0838,
-       WMI_BF_RXSS_MGMT_CMDID          = 0x0839,
-       WMI_BF_TRIG_CMDID               = 0x083A,
-       WMI_SET_SECTORS_CMDID           = 0x0849,
-       WMI_MAINTAIN_PAUSE_CMDID        = 0x0850,
-       WMI_MAINTAIN_RESUME_CMDID       = 0x0851,
-       WMI_RS_MGMT_CMDID               = 0x0852,
-       WMI_RF_MGMT_CMDID               = 0x0853,
-       WMI_THERMAL_THROTTLING_CTRL_CMDID       = 0x0854,
-       WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
+       WMI_CONNECT_CMDID                       = 0x01,
+       WMI_DISCONNECT_CMDID                    = 0x03,
+       WMI_DISCONNECT_STA_CMDID                = 0x04,
+       WMI_START_SCAN_CMDID                    = 0x07,
+       WMI_SET_BSS_FILTER_CMDID                = 0x09,
+       WMI_SET_PROBED_SSID_CMDID               = 0x0A,
+       WMI_SET_LISTEN_INT_CMDID                = 0x0B,
+       WMI_BCON_CTRL_CMDID                     = 0x0F,
+       WMI_ADD_CIPHER_KEY_CMDID                = 0x16,
+       WMI_DELETE_CIPHER_KEY_CMDID             = 0x17,
+       WMI_PCP_CONF_CMDID                      = 0x18,
+       WMI_SET_APPIE_CMDID                     = 0x3F,
+       WMI_SET_WSC_STATUS_CMDID                = 0x41,
+       WMI_PXMT_RANGE_CFG_CMDID                = 0x42,
+       WMI_PXMT_SNR2_RANGE_CFG_CMDID           = 0x43,
+       WMI_MEM_READ_CMDID                      = 0x800,
+       WMI_MEM_WR_CMDID                        = 0x801,
+       WMI_ECHO_CMDID                          = 0x803,
+       WMI_DEEP_ECHO_CMDID                     = 0x804,
+       WMI_CONFIG_MAC_CMDID                    = 0x805,
+       WMI_CONFIG_PHY_DEBUG_CMDID              = 0x806,
+       WMI_ADD_DEBUG_TX_PCKT_CMDID             = 0x808,
+       WMI_PHY_GET_STATISTICS_CMDID            = 0x809,
+       WMI_FS_TUNE_CMDID                       = 0x80A,
+       WMI_CORR_MEASURE_CMDID                  = 0x80B,
+       WMI_READ_RSSI_CMDID                     = 0x80C,
+       WMI_TEMP_SENSE_CMDID                    = 0x80E,
+       WMI_DC_CALIB_CMDID                      = 0x80F,
+       WMI_SEND_TONE_CMDID                     = 0x810,
+       WMI_IQ_TX_CALIB_CMDID                   = 0x811,
+       WMI_IQ_RX_CALIB_CMDID                   = 0x812,
+       WMI_SET_UCODE_IDLE_CMDID                = 0x813,
+       WMI_SET_WORK_MODE_CMDID                 = 0x815,
+       WMI_LO_LEAKAGE_CALIB_CMDID              = 0x816,
+       WMI_MARLON_R_READ_CMDID                 = 0x818,
+       WMI_MARLON_R_WRITE_CMDID                = 0x819,
+       WMI_MARLON_R_TXRX_SEL_CMDID             = 0x81A,
+       MAC_IO_STATIC_PARAMS_CMDID              = 0x81B,
+       MAC_IO_DYNAMIC_PARAMS_CMDID             = 0x81C,
+       WMI_SILENT_RSSI_CALIB_CMDID             = 0x81D,
+       WMI_RF_RX_TEST_CMDID                    = 0x81E,
+       WMI_CFG_RX_CHAIN_CMDID                  = 0x820,
+       WMI_VRING_CFG_CMDID                     = 0x821,
+       WMI_BCAST_VRING_CFG_CMDID               = 0x822,
+       WMI_VRING_BA_EN_CMDID                   = 0x823,
+       WMI_VRING_BA_DIS_CMDID                  = 0x824,
+       WMI_RCP_ADDBA_RESP_CMDID                = 0x825,
+       WMI_RCP_DELBA_CMDID                     = 0x826,
+       WMI_SET_SSID_CMDID                      = 0x827,
+       WMI_GET_SSID_CMDID                      = 0x828,
+       WMI_SET_PCP_CHANNEL_CMDID               = 0x829,
+       WMI_GET_PCP_CHANNEL_CMDID               = 0x82A,
+       WMI_SW_TX_REQ_CMDID                     = 0x82B,
+       WMI_READ_MAC_RXQ_CMDID                  = 0x830,
+       WMI_READ_MAC_TXQ_CMDID                  = 0x831,
+       WMI_WRITE_MAC_RXQ_CMDID                 = 0x832,
+       WMI_WRITE_MAC_TXQ_CMDID                 = 0x833,
+       WMI_WRITE_MAC_XQ_FIELD_CMDID            = 0x834,
+       WMI_MLME_PUSH_CMDID                     = 0x835,
+       WMI_BEAMFORMING_MGMT_CMDID              = 0x836,
+       WMI_BF_TXSS_MGMT_CMDID                  = 0x837,
+       WMI_BF_SM_MGMT_CMDID                    = 0x838,
+       WMI_BF_RXSS_MGMT_CMDID                  = 0x839,
+       WMI_BF_TRIG_CMDID                       = 0x83A,
+       WMI_SET_SECTORS_CMDID                   = 0x849,
+       WMI_MAINTAIN_PAUSE_CMDID                = 0x850,
+       WMI_MAINTAIN_RESUME_CMDID               = 0x851,
+       WMI_RS_MGMT_CMDID                       = 0x852,
+       WMI_RF_MGMT_CMDID                       = 0x853,
+       WMI_THERMAL_THROTTLING_CTRL_CMDID       = 0x854,
+       WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
+       WMI_OTP_READ_CMDID                      = 0x856,
+       WMI_OTP_WRITE_CMDID                     = 0x857,
        /* Performance monitoring commands */
-       WMI_BF_CTRL_CMDID               = 0x0862,
-       WMI_NOTIFY_REQ_CMDID            = 0x0863,
-       WMI_GET_STATUS_CMDID            = 0x0864,
-       WMI_UNIT_TEST_CMDID             = 0x0900,
-       WMI_HICCUP_CMDID                = 0x0901,
-       WMI_FLASH_READ_CMDID            = 0x0902,
-       WMI_FLASH_WRITE_CMDID           = 0x0903,
-       WMI_SECURITY_UNIT_TEST_CMDID    = 0x0904,
-       /*P2P*/
-       WMI_P2P_CFG_CMDID               = 0x0910,
-       WMI_PORT_ALLOCATE_CMDID         = 0x0911,
-       WMI_PORT_DELETE_CMDID           = 0x0912,
-       WMI_POWER_MGMT_CFG_CMDID        = 0x0913,
-       WMI_START_LISTEN_CMDID          = 0x0914,
-       WMI_START_SEARCH_CMDID          = 0x0915,
-       WMI_DISCOVERY_START_CMDID       = 0x0916,
-       WMI_DISCOVERY_STOP_CMDID        = 0x0917,
-       WMI_PCP_START_CMDID             = 0x0918,
-       WMI_PCP_STOP_CMDID              = 0x0919,
-       WMI_GET_PCP_FACTOR_CMDID        = 0x091b,
-
-       WMI_SET_MAC_ADDRESS_CMDID       = 0xf003,
-       WMI_ABORT_SCAN_CMDID            = 0xf007,
-       WMI_SET_PMK_CMDID               = 0xf028,
-
-       WMI_SET_PROMISCUOUS_MODE_CMDID  = 0xf041,
-       WMI_GET_PMK_CMDID               = 0xf048,
-       WMI_SET_PASSPHRASE_CMDID        = 0xf049,
-       WMI_SEND_ASSOC_RES_CMDID        = 0xf04a,
-       WMI_SET_ASSOC_REQ_RELAY_CMDID   = 0xf04b,
-       WMI_EAPOL_TX_CMDID              = 0xf04c,
-       WMI_MAC_ADDR_REQ_CMDID          = 0xf04d,
-       WMI_FW_VER_CMDID                = 0xf04e,
-       WMI_PMC_CMDID                   = 0xf04f,
+       WMI_BF_CTRL_CMDID                       = 0x862,
+       WMI_NOTIFY_REQ_CMDID                    = 0x863,
+       WMI_GET_STATUS_CMDID                    = 0x864,
+       WMI_UNIT_TEST_CMDID                     = 0x900,
+       WMI_HICCUP_CMDID                        = 0x901,
+       WMI_FLASH_READ_CMDID                    = 0x902,
+       WMI_FLASH_WRITE_CMDID                   = 0x903,
+       /* P2P */
+       WMI_P2P_CFG_CMDID                       = 0x910,
+       WMI_PORT_ALLOCATE_CMDID                 = 0x911,
+       WMI_PORT_DELETE_CMDID                   = 0x912,
+       WMI_POWER_MGMT_CFG_CMDID                = 0x913,
+       WMI_START_LISTEN_CMDID                  = 0x914,
+       WMI_START_SEARCH_CMDID                  = 0x915,
+       WMI_DISCOVERY_START_CMDID               = 0x916,
+       WMI_DISCOVERY_STOP_CMDID                = 0x917,
+       WMI_PCP_START_CMDID                     = 0x918,
+       WMI_PCP_STOP_CMDID                      = 0x919,
+       WMI_GET_PCP_FACTOR_CMDID                = 0x91B,
+       WMI_SET_MAC_ADDRESS_CMDID               = 0xF003,
+       WMI_ABORT_SCAN_CMDID                    = 0xF007,
+       WMI_SET_PROMISCUOUS_MODE_CMDID          = 0xF041,
+       WMI_GET_PMK_CMDID                       = 0xF048,
+       WMI_SET_PASSPHRASE_CMDID                = 0xF049,
+       WMI_SEND_ASSOC_RES_CMDID                = 0xF04A,
+       WMI_SET_ASSOC_REQ_RELAY_CMDID           = 0xF04B,
+       WMI_MAC_ADDR_REQ_CMDID                  = 0xF04D,
+       WMI_FW_VER_CMDID                        = 0xF04E,
+       WMI_PMC_CMDID                           = 0xF04F,
 };
 
-/*
- * Commands data structures
- */
-
-/*
- * WMI_CONNECT_CMDID
- */
+/* WMI_CONNECT_CMDID */
 enum wmi_network_type {
        WMI_NETTYPE_INFRA               = 0x01,
        WMI_NETTYPE_ADHOC               = 0x02,
        WMI_NETTYPE_ADHOC_CREATOR       = 0x04,
        WMI_NETTYPE_AP                  = 0x10,
        WMI_NETTYPE_P2P                 = 0x20,
-       WMI_NETTYPE_WBE                 = 0x40, /* PCIE over 60g */
+       /* PCIE over 60g */
+       WMI_NETTYPE_WBE                 = 0x40,
 };
 
 enum wmi_dot11_auth_mode {
-       WMI_AUTH11_OPEN                 = 0x01,
-       WMI_AUTH11_SHARED               = 0x02,
-       WMI_AUTH11_LEAP                 = 0x04,
-       WMI_AUTH11_WSC                  = 0x08,
+       WMI_AUTH11_OPEN         = 0x01,
+       WMI_AUTH11_SHARED       = 0x02,
+       WMI_AUTH11_LEAP         = 0x04,
+       WMI_AUTH11_WSC          = 0x08,
 };
 
 enum wmi_auth_mode {
-       WMI_AUTH_NONE                   = 0x01,
-       WMI_AUTH_WPA                    = 0x02,
-       WMI_AUTH_WPA2                   = 0x04,
-       WMI_AUTH_WPA_PSK                = 0x08,
-       WMI_AUTH_WPA2_PSK               = 0x10,
-       WMI_AUTH_WPA_CCKM               = 0x20,
-       WMI_AUTH_WPA2_CCKM              = 0x40,
+       WMI_AUTH_NONE           = 0x01,
+       WMI_AUTH_WPA            = 0x02,
+       WMI_AUTH_WPA2           = 0x04,
+       WMI_AUTH_WPA_PSK        = 0x08,
+       WMI_AUTH_WPA2_PSK       = 0x10,
+       WMI_AUTH_WPA_CCKM       = 0x20,
+       WMI_AUTH_WPA2_CCKM      = 0x40,
 };
 
 enum wmi_crypto_type {
-       WMI_CRYPT_NONE                  = 0x01,
-       WMI_CRYPT_WEP                   = 0x02,
-       WMI_CRYPT_TKIP                  = 0x04,
-       WMI_CRYPT_AES                   = 0x08,
-       WMI_CRYPT_AES_GCMP              = 0x20,
+       WMI_CRYPT_NONE          = 0x01,
+       WMI_CRYPT_AES_GCMP      = 0x20,
 };
 
 enum wmi_connect_ctrl_flag_bits {
-       WMI_CONNECT_ASSOC_POLICY_USER           = 0x0001,
-       WMI_CONNECT_SEND_REASSOC                = 0x0002,
-       WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER     = 0x0004,
-       WMI_CONNECT_PROFILE_MATCH_DONE          = 0x0008,
-       WMI_CONNECT_IGNORE_AAC_BEACON           = 0x0010,
-       WMI_CONNECT_CSA_FOLLOW_BSS              = 0x0020,
-       WMI_CONNECT_DO_WPA_OFFLOAD              = 0x0040,
-       WMI_CONNECT_DO_NOT_DEAUTH               = 0x0080,
+       WMI_CONNECT_ASSOC_POLICY_USER           = 0x01,
+       WMI_CONNECT_SEND_REASSOC                = 0x02,
+       WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER     = 0x04,
+       WMI_CONNECT_PROFILE_MATCH_DONE          = 0x08,
+       WMI_CONNECT_IGNORE_AAC_BEACON           = 0x10,
+       WMI_CONNECT_CSA_FOLLOW_BSS              = 0x20,
+       WMI_CONNECT_DO_WPA_OFFLOAD              = 0x40,
+       WMI_CONNECT_DO_NOT_DEAUTH               = 0x80,
 };
 
-#define WMI_MAX_SSID_LEN    (32)
+#define WMI_MAX_SSID_LEN       (32)
 
+/* WMI_CONNECT_CMDID */
 struct wmi_connect_cmd {
        u8 network_type;
        u8 dot11_auth_mode;
@@ -216,31 +226,17 @@ struct wmi_connect_cmd {
        u8 reserved1[2];
 } __packed;
 
-/*
- * WMI_DISCONNECT_STA_CMDID
- */
+/* WMI_DISCONNECT_STA_CMDID */
 struct wmi_disconnect_sta_cmd {
        u8 dst_mac[WMI_MAC_LEN];
        __le16 disconnect_reason;
 } __packed;
 
-/*
- * WMI_SET_PMK_CMDID
- */
-
-#define WMI_MIN_KEY_INDEX      (0)
 #define WMI_MAX_KEY_INDEX      (3)
 #define WMI_MAX_KEY_LEN                (32)
 #define WMI_PASSPHRASE_LEN     (64)
-#define WMI_PMK_LEN            (32)
-
-struct  wmi_set_pmk_cmd {
-       u8 pmk[WMI_PMK_LEN];
-} __packed;
 
-/*
- * WMI_SET_PASSPHRASE_CMDID
- */
+/* WMI_SET_PASSPHRASE_CMDID */
 struct wmi_set_passphrase_cmd {
        u8 ssid[WMI_MAX_SSID_LEN];
        u8 passphrase[WMI_PASSPHRASE_LEN];
@@ -248,36 +244,34 @@ struct wmi_set_passphrase_cmd {
        u8 passphrase_len;
 } __packed;
 
-/*
- * WMI_ADD_CIPHER_KEY_CMDID
- */
+/* WMI_ADD_CIPHER_KEY_CMDID */
 enum wmi_key_usage {
-       WMI_KEY_USE_PAIRWISE    = 0,
-       WMI_KEY_USE_RX_GROUP    = 1,
-       WMI_KEY_USE_TX_GROUP    = 2,
+       WMI_KEY_USE_PAIRWISE    = 0x00,
+       WMI_KEY_USE_RX_GROUP    = 0x01,
+       WMI_KEY_USE_TX_GROUP    = 0x02,
 };
 
 struct wmi_add_cipher_key_cmd {
        u8 key_index;
        u8 key_type;
-       u8 key_usage;           /* enum wmi_key_usage */
+       /* enum wmi_key_usage */
+       u8 key_usage;
        u8 key_len;
-       u8 key_rsc[8];          /* key replay sequence counter */
+       /* key replay sequence counter */
+       u8 key_rsc[8];
        u8 key[WMI_MAX_KEY_LEN];
-       u8 key_op_ctrl;         /* Additional Key Control information */
+       /* Additional Key Control information */
+       u8 key_op_ctrl;
        u8 mac[WMI_MAC_LEN];
 } __packed;
 
-/*
- * WMI_DELETE_CIPHER_KEY_CMDID
- */
+/* WMI_DELETE_CIPHER_KEY_CMDID */
 struct wmi_delete_cipher_key_cmd {
        u8 key_index;
        u8 mac[WMI_MAC_LEN];
 } __packed;
 
-/*
- * WMI_START_SCAN_CMDID
+/* WMI_START_SCAN_CMDID
  *
  * Start L1 scan operation
  *
@@ -286,146 +280,142 @@ struct wmi_delete_cipher_key_cmd {
  * - WMI_SCAN_COMPLETE_EVENTID
  */
 enum wmi_scan_type {
-       WMI_LONG_SCAN           = 0,
-       WMI_SHORT_SCAN          = 1,
-       WMI_PBC_SCAN            = 2,
-       WMI_DIRECT_SCAN         = 3,
-       WMI_ACTIVE_SCAN         = 4,
+       WMI_ACTIVE_SCAN         = 0x00,
+       WMI_SHORT_SCAN          = 0x01,
+       WMI_PASSIVE_SCAN        = 0x02,
+       WMI_DIRECT_SCAN         = 0x03,
+       WMI_LONG_SCAN           = 0x04,
 };
 
+/* WMI_START_SCAN_CMDID */
 struct wmi_start_scan_cmd {
-       u8 direct_scan_mac_addr[6];
-       u8 reserved[2];
-       __le32 home_dwell_time; /* Max duration in the home channel(ms) */
-       __le32 force_scan_interval;     /* Time interval between scans (ms)*/
-       u8 scan_type;           /* wmi_scan_type */
-       u8 num_channels;                /* how many channels follow */
+       u8 direct_scan_mac_addr[WMI_MAC_LEN];
+       /* DMG Beacon frame is transmitted during active scanning */
+       u8 discovery_mode;
+       /* reserved */
+       u8 reserved;
+       /* Max duration in the home channel(ms) */
+       __le32 dwell_time;
+       /* Time interval between scans (ms) */
+       __le32 force_scan_interval;
+       /* enum wmi_scan_type */
+       u8 scan_type;
+       /* how many channels follow */
+       u8 num_channels;
+       /* channels ID's:
+        * 0 - 58320 MHz
+        * 1 - 60480 MHz
+        * 2 - 62640 MHz
+        */
        struct {
                u8 channel;
                u8 reserved;
-       } channel_list[0];      /* channels ID's */
-                               /* 0 - 58320 MHz */
-                               /* 1 - 60480 MHz */
-                               /* 2 - 62640 MHz */
+       } channel_list[0];
 } __packed;
 
-/*
- * WMI_SET_PROBED_SSID_CMDID
- */
+/* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX  (3)
 
 enum wmi_ssid_flag {
-       WMI_SSID_FLAG_DISABLE   = 0,    /* disables entry */
-       WMI_SSID_FLAG_SPECIFIC  = 1,    /* probes specified ssid */
-       WMI_SSID_FLAG_ANY       = 2,    /* probes for any ssid */
+       /* disables entry */
+       WMI_SSID_FLAG_DISABLE   = 0x00,
+       /* probes specified ssid */
+       WMI_SSID_FLAG_SPECIFIC  = 0x01,
+       /* probes for any ssid */
+       WMI_SSID_FLAG_ANY       = 0x02,
 };
 
 struct wmi_probed_ssid_cmd {
-       u8 entry_index;                 /* 0 to MAX_PROBED_SSID_INDEX */
-       u8 flag;                        /* enum wmi_ssid_flag */
+       /* 0 to MAX_PROBED_SSID_INDEX */
+       u8 entry_index;
+       /* enum wmi_ssid_flag */
+       u8 flag;
        u8 ssid_len;
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_SET_APPIE_CMDID
+/* WMI_SET_APPIE_CMDID
  * Add Application specified IE to a management frame
  */
-#define WMI_MAX_IE_LEN         (1024)
+#define WMI_MAX_IE_LEN (1024)
 
-/*
- * Frame Types
- */
+/* Frame Types */
 enum wmi_mgmt_frame_type {
-       WMI_FRAME_BEACON        = 0,
-       WMI_FRAME_PROBE_REQ     = 1,
-       WMI_FRAME_PROBE_RESP    = 2,
-       WMI_FRAME_ASSOC_REQ     = 3,
-       WMI_FRAME_ASSOC_RESP    = 4,
-       WMI_NUM_MGMT_FRAME,
+       WMI_FRAME_BEACON        = 0x00,
+       WMI_FRAME_PROBE_REQ     = 0x01,
+       WMI_FRAME_PROBE_RESP    = 0x02,
+       WMI_FRAME_ASSOC_REQ     = 0x03,
+       WMI_FRAME_ASSOC_RESP    = 0x04,
+       WMI_NUM_MGMT_FRAME      = 0x05,
 };
 
 struct wmi_set_appie_cmd {
-       u8 mgmt_frm_type;       /* enum wmi_mgmt_frame_type */
+       /* enum wmi_mgmt_frame_type */
+       u8 mgmt_frm_type;
        u8 reserved;
-       __le16 ie_len;  /* Length of the IE to be added to MGMT frame */
+       /* Length of the IE to be added to MGMT frame */
+       __le16 ie_len;
        u8 ie_info[0];
 } __packed;
 
-/*
- * WMI_PXMT_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_RANGE_CFG_CMDID */
 struct wmi_pxmt_range_cfg_cmd {
        u8 dst_mac[WMI_MAC_LEN];
        __le16 range;
 } __packed;
 
-/*
- * WMI_PXMT_SNR2_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
 struct wmi_pxmt_snr2_range_cfg_cmd {
-       s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+       s8 snr2range_arr[2];
 } __packed;
 
-/*
- * WMI_RF_MGMT_CMDID
- */
+/* WMI_RF_MGMT_CMDID */
 enum wmi_rf_mgmt_type {
-       WMI_RF_MGMT_W_DISABLE   = 0,
-       WMI_RF_MGMT_W_ENABLE    = 1,
-       WMI_RF_MGMT_GET_STATUS  = 2,
+       WMI_RF_MGMT_W_DISABLE   = 0x00,
+       WMI_RF_MGMT_W_ENABLE    = 0x01,
+       WMI_RF_MGMT_GET_STATUS  = 0x02,
 };
 
+/* WMI_RF_MGMT_CMDID */
 struct wmi_rf_mgmt_cmd {
        __le32 rf_mgmt_type;
 } __packed;
 
-/*
- * WMI_THERMAL_THROTTLING_CTRL_CMDID
- */
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
 #define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
 
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
 struct wmi_thermal_throttling_ctrl_cmd {
        __le32 time_on_usec;
        __le32 time_off_usec;
        __le32 max_txop_length_usec;
 } __packed;
 
-/*
- * WMI_RF_RX_TEST_CMDID
- */
+/* WMI_RF_RX_TEST_CMDID */
 struct wmi_rf_rx_test_cmd {
        __le32 sector;
 } __packed;
 
-/*
- * WMI_CORR_MEASURE_CMDID
- */
+/* WMI_CORR_MEASURE_CMDID */
 struct wmi_corr_measure_cmd {
-       s32 freq_mhz;
+       __le32 freq_mhz;
        __le32 length_samples;
        __le32 iterations;
 } __packed;
 
-/*
- * WMI_SET_SSID_CMDID
- */
+/* WMI_SET_SSID_CMDID */
 struct wmi_set_ssid_cmd {
        __le32 ssid_len;
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_SET_PCP_CHANNEL_CMDID
- */
+/* WMI_SET_PCP_CHANNEL_CMDID */
 struct wmi_set_pcp_channel_cmd {
        u8 channel;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_BCON_CTRL_CMDID
- */
+/* WMI_BCON_CTRL_CMDID */
 struct wmi_bcon_ctrl_cmd {
        __le16 bcon_interval;
        __le16 frag_num;
@@ -434,214 +424,192 @@ struct wmi_bcon_ctrl_cmd {
        u8 pcp_max_assoc_sta;
        u8 disable_sec_offload;
        u8 disable_sec;
+       u8 hidden_ssid;
+       u8 is_go;
+       u8 reserved[2];
 } __packed;
 
-/******* P2P ***********/
-
-/*
- * WMI_PORT_ALLOCATE_CMDID
- */
+/* WMI_PORT_ALLOCATE_CMDID */
 enum wmi_port_role {
-       WMI_PORT_STA            = 0,
-       WMI_PORT_PCP            = 1,
-       WMI_PORT_AP             = 2,
-       WMI_PORT_P2P_DEV        = 3,
-       WMI_PORT_P2P_CLIENT     = 4,
-       WMI_PORT_P2P_GO         = 5,
+       WMI_PORT_STA            = 0x00,
+       WMI_PORT_PCP            = 0x01,
+       WMI_PORT_AP             = 0x02,
+       WMI_PORT_P2P_DEV        = 0x03,
+       WMI_PORT_P2P_CLIENT     = 0x04,
+       WMI_PORT_P2P_GO         = 0x05,
 };
 
+/* WMI_PORT_ALLOCATE_CMDID */
 struct wmi_port_allocate_cmd {
        u8 mac[WMI_MAC_LEN];
        u8 port_role;
        u8 mid;
 } __packed;
 
-/*
- * WMI_PORT_DELETE_CMDID
- */
-struct wmi_delete_port_cmd {
+/* WMI_PORT_DELETE_CMDID */
+struct wmi_port_delete_cmd {
        u8 mid;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_P2P_CFG_CMDID
- */
+/* WMI_P2P_CFG_CMDID */
 enum wmi_discovery_mode {
-       WMI_DISCOVERY_MODE_NON_OFFLOAD  = 0,
-       WMI_DISCOVERY_MODE_OFFLOAD      = 1,
-       WMI_DISCOVERY_MODE_PEER2PEER    = 2,
+       WMI_DISCOVERY_MODE_NON_OFFLOAD  = 0x00,
+       WMI_DISCOVERY_MODE_OFFLOAD      = 0x01,
+       WMI_DISCOVERY_MODE_PEER2PEER    = 0x02,
 };
 
 struct wmi_p2p_cfg_cmd {
-       u8 discovery_mode;      /* wmi_discovery_mode */
+       /* enum wmi_discovery_mode */
+       u8 discovery_mode;
        u8 channel;
-       __le16 bcon_interval; /* base to listen/search duration calculation */
+       /* base to listen/search duration calculation */
+       __le16 bcon_interval;
 } __packed;
 
-/*
- * WMI_POWER_MGMT_CFG_CMDID
- */
+/* WMI_POWER_MGMT_CFG_CMDID */
 enum wmi_power_source_type {
-       WMI_POWER_SOURCE_BATTERY        = 0,
-       WMI_POWER_SOURCE_OTHER          = 1,
+       WMI_POWER_SOURCE_BATTERY        = 0x00,
+       WMI_POWER_SOURCE_OTHER          = 0x01,
 };
 
 struct wmi_power_mgmt_cfg_cmd {
-       u8 power_source;        /* wmi_power_source_type */
+       /* enum wmi_power_source_type */
+       u8 power_source;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_START_CMDID
- */
-
-enum wmi_hidden_ssid {
-       WMI_HIDDEN_SSID_DISABLED        = 0,
-       WMI_HIDDEN_SSID_SEND_EMPTY      = 1,
-       WMI_HIDDEN_SSID_CLEAR   = 2,
-};
-
+/* WMI_PCP_START_CMDID */
 struct wmi_pcp_start_cmd {
        __le16 bcon_interval;
        u8 pcp_max_assoc_sta;
        u8 hidden_ssid;
-       u8 reserved0[8];
+       u8 is_go;
+       u8 reserved0[7];
        u8 network_type;
        u8 channel;
        u8 disable_sec_offload;
        u8 disable_sec;
 } __packed;
 
-/*
- * WMI_SW_TX_REQ_CMDID
- */
+/* WMI_SW_TX_REQ_CMDID */
 struct wmi_sw_tx_req_cmd {
        u8 dst_mac[WMI_MAC_LEN];
        __le16 len;
        u8 payload[0];
 } __packed;
 
-/*
- * WMI_VRING_CFG_CMDID
- */
-
 struct wmi_sw_ring_cfg {
        __le64 ring_mem_base;
        __le16 ring_size;
        __le16 max_mpdu_size;
 } __packed;
 
+/* wmi_vring_cfg_schd */
 struct wmi_vring_cfg_schd {
        __le16 priority;
        __le16 timeslot_us;
 } __packed;
 
 enum wmi_vring_cfg_encap_trans_type {
-       WMI_VRING_ENC_TYPE_802_3                = 0,
-       WMI_VRING_ENC_TYPE_NATIVE_WIFI          = 1,
+       WMI_VRING_ENC_TYPE_802_3        = 0x00,
+       WMI_VRING_ENC_TYPE_NATIVE_WIFI  = 0x01,
 };
 
 enum wmi_vring_cfg_ds_cfg {
-       WMI_VRING_DS_PBSS                       = 0,
-       WMI_VRING_DS_STATION                    = 1,
-       WMI_VRING_DS_AP                         = 2,
-       WMI_VRING_DS_ADDR4                      = 3,
+       WMI_VRING_DS_PBSS       = 0x00,
+       WMI_VRING_DS_STATION    = 0x01,
+       WMI_VRING_DS_AP         = 0x02,
+       WMI_VRING_DS_ADDR4      = 0x03,
 };
 
 enum wmi_vring_cfg_nwifi_ds_trans_type {
-       WMI_NWIFI_TX_TRANS_MODE_NO              = 0,
-       WMI_NWIFI_TX_TRANS_MODE_AP2PBSS         = 1,
-       WMI_NWIFI_TX_TRANS_MODE_STA2PBSS        = 2,
+       WMI_NWIFI_TX_TRANS_MODE_NO              = 0x00,
+       WMI_NWIFI_TX_TRANS_MODE_AP2PBSS         = 0x01,
+       WMI_NWIFI_TX_TRANS_MODE_STA2PBSS        = 0x02,
 };
 
 enum wmi_vring_cfg_schd_params_priority {
-       WMI_SCH_PRIO_REGULAR                    = 0,
-       WMI_SCH_PRIO_HIGH                       = 1,
+       WMI_SCH_PRIO_REGULAR    = 0x00,
+       WMI_SCH_PRIO_HIGH       = 0x01,
 };
 
-#define CIDXTID_CID_POS (0)
-#define CIDXTID_CID_LEN (4)
-#define CIDXTID_CID_MSK (0xF)
-#define CIDXTID_TID_POS (4)
-#define CIDXTID_TID_LEN (4)
-#define CIDXTID_TID_MSK (0xF0)
+#define CIDXTID_CID_POS                                (0)
+#define CIDXTID_CID_LEN                                (4)
+#define CIDXTID_CID_MSK                                (0xF)
+#define CIDXTID_TID_POS                                (4)
+#define CIDXTID_TID_LEN                                (4)
+#define CIDXTID_TID_MSK                                (0xF0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS     (0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN     (1)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK     (0x1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_POS         (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN         (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK         (0x2)
+#define VRING_CFG_TO_RESOLUTION_VALUE_POS      (0)
+#define VRING_CFG_TO_RESOLUTION_VALUE_LEN      (6)
+#define VRING_CFG_TO_RESOLUTION_VALUE_MSK      (0x3F)
 
 struct wmi_vring_cfg {
        struct wmi_sw_ring_cfg tx_sw_ring;
-       u8 ringid;                              /* 0-23 vrings */
-
+       /* 0-23 vrings */
+       u8 ringid;
        u8 cidxtid;
-
        u8 encap_trans_type;
-       u8 ds_cfg;                              /* 802.3 DS cfg */
+       /* 802.3 DS cfg */
+       u8 ds_cfg;
        u8 nwifi_ds_trans_type;
-
-       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
-       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
-       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
-       #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
-       #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
-       #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
        u8 mac_ctrl;
-
-       #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
-       #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
-       #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
        u8 to_resolution;
        u8 agg_max_wsize;
        struct wmi_vring_cfg_schd schd_params;
 } __packed;
 
 enum wmi_vring_cfg_cmd_action {
-       WMI_VRING_CMD_ADD                       = 0,
-       WMI_VRING_CMD_MODIFY                    = 1,
-       WMI_VRING_CMD_DELETE                    = 2,
+       WMI_VRING_CMD_ADD       = 0x00,
+       WMI_VRING_CMD_MODIFY    = 0x01,
+       WMI_VRING_CMD_DELETE    = 0x02,
 };
 
+/* WMI_VRING_CFG_CMDID */
 struct wmi_vring_cfg_cmd {
        __le32 action;
        struct wmi_vring_cfg vring_cfg;
 } __packed;
 
-/*
- * WMI_BCAST_VRING_CFG_CMDID
- */
 struct wmi_bcast_vring_cfg {
        struct wmi_sw_ring_cfg tx_sw_ring;
-       u8 ringid;                              /* 0-23 vrings */
+       /* 0-23 vrings */
+       u8 ringid;
        u8 encap_trans_type;
-       u8 ds_cfg;                              /* 802.3 DS cfg */
+       /* 802.3 DS cfg */
+       u8 ds_cfg;
        u8 nwifi_ds_trans_type;
 } __packed;
 
+/* WMI_BCAST_VRING_CFG_CMDID */
 struct wmi_bcast_vring_cfg_cmd {
        __le32 action;
        struct wmi_bcast_vring_cfg vring_cfg;
 } __packed;
 
-/*
- * WMI_VRING_BA_EN_CMDID
- */
+/* WMI_VRING_BA_EN_CMDID */
 struct wmi_vring_ba_en_cmd {
        u8 ringid;
        u8 agg_max_wsize;
        __le16 ba_timeout;
        u8 amsdu;
+       u8 reserved[3];
 } __packed;
 
-/*
- * WMI_VRING_BA_DIS_CMDID
- */
+/* WMI_VRING_BA_DIS_CMDID */
 struct wmi_vring_ba_dis_cmd {
        u8 ringid;
        u8 reserved;
        __le16 reason;
 } __packed;
 
-/*
- * WMI_NOTIFY_REQ_CMDID
- */
+/* WMI_NOTIFY_REQ_CMDID */
 struct wmi_notify_req_cmd {
        u8 cid;
        u8 year;
@@ -654,102 +622,100 @@ struct wmi_notify_req_cmd {
        u8 miliseconds;
 } __packed;
 
-/*
- * WMI_CFG_RX_CHAIN_CMDID
- */
+/* WMI_CFG_RX_CHAIN_CMDID */
 enum wmi_sniffer_cfg_mode {
-       WMI_SNIFFER_OFF                         = 0,
-       WMI_SNIFFER_ON                          = 1,
+       WMI_SNIFFER_OFF = 0x00,
+       WMI_SNIFFER_ON  = 0x01,
 };
 
 enum wmi_sniffer_cfg_phy_info_mode {
-       WMI_SNIFFER_PHY_INFO_DISABLED           = 0,
-       WMI_SNIFFER_PHY_INFO_ENABLED            = 1,
+       WMI_SNIFFER_PHY_INFO_DISABLED   = 0x00,
+       WMI_SNIFFER_PHY_INFO_ENABLED    = 0x01,
 };
 
 enum wmi_sniffer_cfg_phy_support {
-       WMI_SNIFFER_CP                          = 0,
-       WMI_SNIFFER_DP                          = 1,
-       WMI_SNIFFER_BOTH_PHYS                   = 2,
+       WMI_SNIFFER_CP          = 0x00,
+       WMI_SNIFFER_DP          = 0x01,
+       WMI_SNIFFER_BOTH_PHYS   = 0x02,
 };
 
+/* wmi_sniffer_cfg */
 struct wmi_sniffer_cfg {
-       __le32 mode;            /* enum wmi_sniffer_cfg_mode */
-       __le32 phy_info_mode;   /* enum wmi_sniffer_cfg_phy_info_mode */
-       __le32 phy_support;     /* enum wmi_sniffer_cfg_phy_support */
+       /* enum wmi_sniffer_cfg_mode */
+       __le32 mode;
+       /* enum wmi_sniffer_cfg_phy_info_mode */
+       __le32 phy_info_mode;
+       /* enum wmi_sniffer_cfg_phy_support */
+       __le32 phy_support;
        u8 channel;
        u8 reserved[3];
 } __packed;
 
 enum wmi_cfg_rx_chain_cmd_action {
-       WMI_RX_CHAIN_ADD                        = 0,
-       WMI_RX_CHAIN_DEL                        = 1,
+       WMI_RX_CHAIN_ADD        = 0x00,
+       WMI_RX_CHAIN_DEL        = 0x01,
 };
 
 enum wmi_cfg_rx_chain_cmd_decap_trans_type {
-       WMI_DECAP_TYPE_802_3                    = 0,
-       WMI_DECAP_TYPE_NATIVE_WIFI              = 1,
-       WMI_DECAP_TYPE_NONE                     = 2,
+       WMI_DECAP_TYPE_802_3            = 0x00,
+       WMI_DECAP_TYPE_NATIVE_WIFI      = 0x01,
+       WMI_DECAP_TYPE_NONE             = 0x02,
 };
 
 enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
-       WMI_NWIFI_RX_TRANS_MODE_NO              = 0,
-       WMI_NWIFI_RX_TRANS_MODE_PBSS2AP         = 1,
-       WMI_NWIFI_RX_TRANS_MODE_PBSS2STA        = 2,
+       WMI_NWIFI_RX_TRANS_MODE_NO              = 0x00,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2AP         = 0x01,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2STA        = 0x02,
 };
 
 enum wmi_cfg_rx_chain_cmd_reorder_type {
-       WMI_RX_HW_REORDER 0,
-       WMI_RX_SW_REORDER 1,
+       WMI_RX_HW_REORDER       = 0x00,
+       WMI_RX_SW_REORDER       = 0x01,
 };
 
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS   (0)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN   (1)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK   (0x1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS            (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN            (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK            (0x2)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS           (0)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN           (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK           (0x1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS            (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN            (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK            (0x2)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS                        (0)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN                        (1)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK                        (0x1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS               (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN               (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK               (0x2)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS          (0)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN          (1)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK          (0x1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_POS                        (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_LEN                        (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_MSK                        (0x2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_POS               (2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN               (1)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK               (0x4)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_POS              (3)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN              (1)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK              (0x8)
+
+/* WMI_CFG_RX_CHAIN_CMDID */
 struct wmi_cfg_rx_chain_cmd {
        __le32 action;
        struct wmi_sw_ring_cfg rx_sw_ring;
        u8 mid;
        u8 decap_trans_type;
-
-       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
-       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
-       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
-       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
-       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
-       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
        u8 l2_802_3_offload_ctrl;
-
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
-       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
        u8 l2_nwifi_offload_ctrl;
-
        u8 vlan_id;
        u8 nwifi_ds_trans_type;
-
-       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
-       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
-       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
-       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
-       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
-       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
        u8 l3_l4_ctrl;
-
-       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
-       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
-       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
-       #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
-       #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
-       #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
-       #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
-       #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
-       #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
-       #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
-       #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
-       #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
        u8 ring_ctrl;
-
        __le16 prefetch_thrsh;
        __le16 wb_thrsh;
        __le32 itr_value;
@@ -757,31 +723,27 @@ struct wmi_cfg_rx_chain_cmd {
        u8 reorder_type;
        u8 reserved;
        struct wmi_sniffer_cfg sniffer_cfg;
+       __le16 max_rx_pl_per_desc;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_RESP_CMDID
- */
+/* WMI_RCP_ADDBA_RESP_CMDID */
 struct wmi_rcp_addba_resp_cmd {
        u8 cidxtid;
        u8 dialog_token;
        __le16 status_code;
-       __le16 ba_param_set;    /* ieee80211_ba_parameterset field to send */
+       /* ieee80211_ba_parameterset field to send */
+       __le16 ba_param_set;
        __le16 ba_timeout;
 } __packed;
 
-/*
- * WMI_RCP_DELBA_CMDID
- */
+/* WMI_RCP_DELBA_CMDID */
 struct wmi_rcp_delba_cmd {
        u8 cidxtid;
        u8 reserved;
        __le16 reason;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_REQ_CMDID
- */
+/* WMI_RCP_ADDBA_REQ_CMDID */
 struct wmi_rcp_addba_req_cmd {
        u8 cidxtid;
        u8 dialog_token;
@@ -792,32 +754,16 @@ struct wmi_rcp_addba_req_cmd {
        __le16 ba_seq_ctrl;
 } __packed;
 
-/*
- * WMI_SET_MAC_ADDRESS_CMDID
- */
+/* WMI_SET_MAC_ADDRESS_CMDID */
 struct wmi_set_mac_address_cmd {
        u8 mac[WMI_MAC_LEN];
        u8 reserved[2];
 } __packed;
 
-/*
-* WMI_EAPOL_TX_CMDID
-*/
-struct wmi_eapol_tx_cmd {
-       u8 dst_mac[WMI_MAC_LEN];
-       __le16 eapol_len;
-       u8 eapol[0];
-} __packed;
-
-/*
- * WMI_ECHO_CMDID
- *
+/* WMI_ECHO_CMDID
  * Check FW is alive
- *
  * WMI_DEEP_ECHO_CMDID
- *
  * Check FW and ucode are alive
- *
  * Returned event: WMI_ECHO_RSP_EVENTID
  * same event for both commands
  */
@@ -825,70 +771,79 @@ struct wmi_echo_cmd {
        __le32 value;
 } __packed;
 
-/*
- * WMI_TEMP_SENSE_CMDID
+/* WMI_OTP_READ_CMDID */
+struct wmi_otp_read_cmd {
+       __le32 addr;
+       __le32 size;
+       __le32 values;
+} __packed;
+
+/* WMI_OTP_WRITE_CMDID */
+struct wmi_otp_write_cmd {
+       __le32 addr;
+       __le32 size;
+       __le32 values;
+} __packed;
+
+/* WMI_TEMP_SENSE_CMDID
  *
  * Measure MAC and radio temperatures
+ *
+ * Possible modes for temperature measurement
  */
-
-/* Possible modes for temperature measurement */
 enum wmi_temperature_measure_mode {
-       TEMPERATURE_USE_OLD_VALUE       = 0x1,
-       TEMPERATURE_MEASURE_NOW         = 0x2,
+       TEMPERATURE_USE_OLD_VALUE       = 0x01,
+       TEMPERATURE_MEASURE_NOW         = 0x02,
 };
 
+/* WMI_TEMP_SENSE_CMDID */
 struct wmi_temp_sense_cmd {
        __le32 measure_baseband_en;
        __le32 measure_rf_en;
        __le32 measure_mode;
 } __packed;
 
-/*
- * WMI_PMC_CMDID
- */
-enum wmi_pmc_op_e {
-       WMI_PMC_ALLOCATE = 0,
-       WMI_PMC_RELEASE = 1,
+enum wmi_pmc_op {
+       WMI_PMC_ALLOCATE        = 0x00,
+       WMI_PMC_RELEASE         = 0x01,
 };
 
+/* WMI_PMC_CMDID */
 struct wmi_pmc_cmd {
-       u8 op;          /* enum wmi_pmc_cmd_op_type */
+       /* enum wmi_pmc_cmd_op_type */
+       u8 op;
        u8 reserved;
        __le16 ring_size;
        __le64 mem_base;
 } __packed;
 
-/*
- * WMI Events
- */
-
-/*
+/* WMI Events
  * List of Events (target to host)
  */
 enum wmi_event_id {
        WMI_READY_EVENTID                       = 0x1001,
        WMI_CONNECT_EVENTID                     = 0x1002,
        WMI_DISCONNECT_EVENTID                  = 0x1003,
-       WMI_SCAN_COMPLETE_EVENTID               = 0x100a,
-       WMI_REPORT_STATISTICS_EVENTID           = 0x100b,
+       WMI_SCAN_COMPLETE_EVENTID               = 0x100A,
+       WMI_REPORT_STATISTICS_EVENTID           = 0x100B,
        WMI_RD_MEM_RSP_EVENTID                  = 0x1800,
        WMI_FW_READY_EVENTID                    = 0x1801,
-       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID      = 0x0200,
+       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID      = 0x200,
        WMI_ECHO_RSP_EVENTID                    = 0x1803,
-       WMI_FS_TUNE_DONE_EVENTID                = 0x180a,
-       WMI_CORR_MEASURE_EVENTID                = 0x180b,
-       WMI_READ_RSSI_EVENTID                   = 0x180c,
-       WMI_TEMP_SENSE_DONE_EVENTID             = 0x180e,
-       WMI_DC_CALIB_DONE_EVENTID               = 0x180f,
+       WMI_FS_TUNE_DONE_EVENTID                = 0x180A,
+       WMI_CORR_MEASURE_EVENTID                = 0x180B,
+       WMI_READ_RSSI_EVENTID                   = 0x180C,
+       WMI_TEMP_SENSE_DONE_EVENTID             = 0x180E,
+       WMI_DC_CALIB_DONE_EVENTID               = 0x180F,
        WMI_IQ_TX_CALIB_DONE_EVENTID            = 0x1811,
        WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
        WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
        WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
        WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
        WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
-       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181a,
-       WMI_SILENT_RSSI_CALIB_DONE_EVENTID      = 0x181d,
-       WMI_RF_RX_TEST_DONE_EVENTID             = 0x181e,
+       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181A,
+       WMI_SILENT_RSSI_CALIB_DONE_EVENTID      = 0x181D,
+       WMI_RF_RX_TEST_DONE_EVENTID             = 0x181E,
        WMI_CFG_RX_CHAIN_DONE_EVENTID           = 0x1820,
        WMI_VRING_CFG_DONE_EVENTID              = 0x1821,
        WMI_BA_STATUS_EVENTID                   = 0x1823,
@@ -896,15 +851,13 @@ enum wmi_event_id {
        WMI_RCP_ADDBA_RESP_SENT_EVENTID         = 0x1825,
        WMI_DELBA_EVENTID                       = 0x1826,
        WMI_GET_SSID_EVENTID                    = 0x1828,
-       WMI_GET_PCP_CHANNEL_EVENTID             = 0x182a,
-       WMI_SW_TX_COMPLETE_EVENTID              = 0x182b,
-
+       WMI_GET_PCP_CHANNEL_EVENTID             = 0x182A,
+       WMI_SW_TX_COMPLETE_EVENTID              = 0x182B,
        WMI_READ_MAC_RXQ_EVENTID                = 0x1830,
        WMI_READ_MAC_TXQ_EVENTID                = 0x1831,
        WMI_WRITE_MAC_RXQ_EVENTID               = 0x1832,
        WMI_WRITE_MAC_TXQ_EVENTID               = 0x1833,
        WMI_WRITE_MAC_XQ_FIELD_EVENTID          = 0x1834,
-
        WMI_BEAMFORMING_MGMT_DONE_EVENTID       = 0x1836,
        WMI_BF_TXSS_MGMT_DONE_EVENTID           = 0x1837,
        WMI_BF_RXSS_MGMT_DONE_EVENTID           = 0x1839,
@@ -914,20 +867,18 @@ enum wmi_event_id {
        WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
        WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
        WMI_TX_MGMT_PACKET_EVENTID              = 0x1841,
-
+       WMI_OTP_READ_RESULT_EVENTID             = 0x1856,
        /* Performance monitoring events */
        WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
        WMI_WBE_LINK_DOWN_EVENTID               = 0x1861,
-
        WMI_BF_CTRL_DONE_EVENTID                = 0x1862,
        WMI_NOTIFY_REQ_DONE_EVENTID             = 0x1863,
        WMI_GET_STATUS_DONE_EVENTID             = 0x1864,
        WMI_VRING_EN_EVENTID                    = 0x1865,
-
        WMI_UNIT_TEST_EVENTID                   = 0x1900,
        WMI_FLASH_READ_DONE_EVENTID             = 0x1902,
        WMI_FLASH_WRITE_DONE_EVENTID            = 0x1903,
-       /*P2P*/
+       /* P2P */
        WMI_P2P_CFG_DONE_EVENTID                = 0x1910,
        WMI_PORT_ALLOCATED_EVENTID              = 0x1911,
        WMI_PORT_DELETED_EVENTID                = 0x1912,
@@ -937,49 +888,42 @@ enum wmi_event_id {
        WMI_DISCOVERY_STOPPED_EVENTID           = 0x1917,
        WMI_PCP_STARTED_EVENTID                 = 0x1918,
        WMI_PCP_STOPPED_EVENTID                 = 0x1919,
-       WMI_PCP_FACTOR_EVENTID                  = 0x191a,
+       WMI_PCP_FACTOR_EVENTID                  = 0x191A,
        WMI_SET_CHANNEL_EVENTID                 = 0x9000,
        WMI_ASSOC_REQ_EVENTID                   = 0x9001,
        WMI_EAPOL_RX_EVENTID                    = 0x9002,
        WMI_MAC_ADDR_RESP_EVENTID               = 0x9003,
        WMI_FW_VER_EVENTID                      = 0x9004,
+       WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID   = 0x9005,
 };
 
-/*
- * Events data structures
- */
-
+/* Events data structures */
 enum wmi_fw_status {
-       WMI_FW_STATUS_SUCCESS,
-       WMI_FW_STATUS_FAILURE,
+       WMI_FW_STATUS_SUCCESS   = 0x00,
+       WMI_FW_STATUS_FAILURE   = 0x01,
 };
 
-/*
- * WMI_RF_MGMT_STATUS_EVENTID
- */
+/* WMI_RF_MGMT_STATUS_EVENTID */
 enum wmi_rf_status {
-       WMI_RF_ENABLED                  = 0,
-       WMI_RF_DISABLED_HW              = 1,
-       WMI_RF_DISABLED_SW              = 2,
-       WMI_RF_DISABLED_HW_SW           = 3,
+       WMI_RF_ENABLED          = 0x00,
+       WMI_RF_DISABLED_HW      = 0x01,
+       WMI_RF_DISABLED_SW      = 0x02,
+       WMI_RF_DISABLED_HW_SW   = 0x03,
 };
 
+/* WMI_RF_MGMT_STATUS_EVENTID */
 struct wmi_rf_mgmt_status_event {
        __le32 rf_status;
 } __packed;
 
-/*
- * WMI_THERMAL_THROTTLING_STATUS_EVENTID
- */
+/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
 struct wmi_thermal_throttling_status_event {
        __le32 time_on_usec;
        __le32 time_off_usec;
        __le32 max_txop_length_usec;
 } __packed;
 
-/*
- * WMI_GET_STATUS_DONE_EVENTID
- */
+/* WMI_GET_STATUS_DONE_EVENTID */
 struct wmi_get_status_done_event {
        __le32 is_associated;
        u8 cid;
@@ -995,9 +939,7 @@ struct wmi_get_status_done_event {
        __le32 is_secured;
 } __packed;
 
-/*
- * WMI_FW_VER_EVENTID
- */
+/* WMI_FW_VER_EVENTID */
 struct wmi_fw_ver_event {
        u8 major;
        u8 minor;
@@ -1005,9 +947,7 @@ struct wmi_fw_ver_event {
        __le16 build;
 } __packed;
 
-/*
-* WMI_MAC_ADDR_RESP_EVENTID
-*/
+/* WMI_MAC_ADDR_RESP_EVENTID */
 struct wmi_mac_addr_resp_event {
        u8 mac[WMI_MAC_LEN];
        u8 auth_mode;
@@ -1015,42 +955,38 @@ struct wmi_mac_addr_resp_event {
        __le32 offload_mode;
 } __packed;
 
-/*
-* WMI_EAPOL_RX_EVENTID
-*/
+/* WMI_EAPOL_RX_EVENTID */
 struct wmi_eapol_rx_event {
        u8 src_mac[WMI_MAC_LEN];
        __le16 eapol_len;
        u8 eapol[0];
 } __packed;
 
-/*
-* WMI_READY_EVENTID
-*/
+/* WMI_READY_EVENTID */
 enum wmi_phy_capability {
-       WMI_11A_CAPABILITY              = 1,
-       WMI_11G_CAPABILITY              = 2,
-       WMI_11AG_CAPABILITY             = 3,
-       WMI_11NA_CAPABILITY             = 4,
-       WMI_11NG_CAPABILITY             = 5,
-       WMI_11NAG_CAPABILITY            = 6,
-       WMI_11AD_CAPABILITY             = 7,
-       WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+       WMI_11A_CAPABILITY              = 0x01,
+       WMI_11G_CAPABILITY              = 0x02,
+       WMI_11AG_CAPABILITY             = 0x03,
+       WMI_11NA_CAPABILITY             = 0x04,
+       WMI_11NG_CAPABILITY             = 0x05,
+       WMI_11NAG_CAPABILITY            = 0x06,
+       WMI_11AD_CAPABILITY             = 0x07,
+       WMI_11N_CAPABILITY_OFFSET       = 0x03,
 };
 
 struct wmi_ready_event {
        __le32 sw_version;
        __le32 abi_version;
        u8 mac[WMI_MAC_LEN];
-       u8 phy_capability;              /* enum wmi_phy_capability */
+       /* enum wmi_phy_capability */
+       u8 phy_capability;
        u8 numof_additional_mids;
 } __packed;
 
-/*
- * WMI_NOTIFY_REQ_DONE_EVENTID
- */
+/* WMI_NOTIFY_REQ_DONE_EVENTID */
 struct wmi_notify_req_done_event {
-       __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */
+       /* beamforming status, 0: fail; 1: OK; 2: retrying */
+       __le32 status;
        __le64 tsf;
        __le32 snr_val;
        __le32 tx_tpt;
@@ -1066,9 +1002,7 @@ struct wmi_notify_req_done_event {
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_CONNECT_EVENTID
- */
+/* WMI_CONNECT_EVENTID */
 struct wmi_connect_event {
        u8 channel;
        u8 reserved0;
@@ -1082,68 +1016,103 @@ struct wmi_connect_event {
        u8 assoc_resp_len;
        u8 cid;
        u8 reserved2[3];
+       /* not in use */
        u8 assoc_info[0];
 } __packed;
 
-/*
- * WMI_DISCONNECT_EVENTID
- */
+/* WMI_DISCONNECT_EVENTID */
 enum wmi_disconnect_reason {
-       WMI_DIS_REASON_NO_NETWORK_AVAIL         = 1,
-       WMI_DIS_REASON_LOST_LINK                = 2, /* bmiss */
-       WMI_DIS_REASON_DISCONNECT_CMD           = 3,
-       WMI_DIS_REASON_BSS_DISCONNECTED         = 4,
-       WMI_DIS_REASON_AUTH_FAILED              = 5,
-       WMI_DIS_REASON_ASSOC_FAILED             = 6,
-       WMI_DIS_REASON_NO_RESOURCES_AVAIL       = 7,
-       WMI_DIS_REASON_CSERV_DISCONNECT         = 8,
-       WMI_DIS_REASON_INVALID_PROFILE          = 10,
-       WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH    = 11,
-       WMI_DIS_REASON_PROFILE_MISMATCH         = 12,
-       WMI_DIS_REASON_CONNECTION_EVICTED       = 13,
-       WMI_DIS_REASON_IBSS_MERGE               = 14,
+       WMI_DIS_REASON_NO_NETWORK_AVAIL         = 0x01,
+       /* bmiss */
+       WMI_DIS_REASON_LOST_LINK                = 0x02,
+       WMI_DIS_REASON_DISCONNECT_CMD           = 0x03,
+       WMI_DIS_REASON_BSS_DISCONNECTED         = 0x04,
+       WMI_DIS_REASON_AUTH_FAILED              = 0x05,
+       WMI_DIS_REASON_ASSOC_FAILED             = 0x06,
+       WMI_DIS_REASON_NO_RESOURCES_AVAIL       = 0x07,
+       WMI_DIS_REASON_CSERV_DISCONNECT         = 0x08,
+       WMI_DIS_REASON_INVALID_PROFILE          = 0x0A,
+       WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH    = 0x0B,
+       WMI_DIS_REASON_PROFILE_MISMATCH         = 0x0C,
+       WMI_DIS_REASON_CONNECTION_EVICTED       = 0x0D,
+       WMI_DIS_REASON_IBSS_MERGE               = 0x0E,
 };
 
 struct wmi_disconnect_event {
-       __le16 protocol_reason_status;  /* reason code, see 802.11 spec. */
-       u8 bssid[WMI_MAC_LEN];          /* set if known */
-       u8 disconnect_reason;           /* see wmi_disconnect_reason */
-       u8 assoc_resp_len;      /* not used */
-       u8 assoc_info[0];       /* not used */
+       /* reason code, see 802.11 spec. */
+       __le16 protocol_reason_status;
+       /* set if known */
+       u8 bssid[WMI_MAC_LEN];
+       /* see enum wmi_disconnect_reason */
+       u8 disconnect_reason;
+       /* last assoc req may passed to host - not in used */
+       u8 assoc_resp_len;
+       /* last assoc req may passed to host - not in used */
+       u8 assoc_info[0];
 } __packed;
 
-/*
- * WMI_SCAN_COMPLETE_EVENTID
- */
+/* WMI_SCAN_COMPLETE_EVENTID */
 enum scan_status {
-       WMI_SCAN_SUCCESS        = 0,
-       WMI_SCAN_FAILED         = 1,
-       WMI_SCAN_ABORTED        = 2,
-       WMI_SCAN_REJECTED       = 3,
+       WMI_SCAN_SUCCESS        = 0x00,
+       WMI_SCAN_FAILED         = 0x01,
+       WMI_SCAN_ABORTED        = 0x02,
+       WMI_SCAN_REJECTED       = 0x03,
+       WMI_SCAN_ABORT_REJECTED = 0x04,
 };
 
 struct wmi_scan_complete_event {
-       __le32 status;  /* scan_status */
+       /* enum scan_status */
+       __le32 status;
 } __packed;
 
-/*
- * WMI_BA_STATUS_EVENTID
- */
+/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
+enum wmi_acs_info_bitmask {
+       WMI_ACS_INFO_BITMASK_BEACON_FOUND       = 0x01,
+       WMI_ACS_INFO_BITMASK_BUSY_TIME          = 0x02,
+       WMI_ACS_INFO_BITMASK_TX_TIME            = 0x04,
+       WMI_ACS_INFO_BITMASK_RX_TIME            = 0x08,
+       WMI_ACS_INFO_BITMASK_NOISE              = 0x10,
+};
+
+struct scan_acs_info {
+       u8 channel;
+       u8 beacon_found;
+       /* msec */
+       __le16 busy_time;
+       __le16 tx_time;
+       __le16 rx_time;
+       u8 noise;
+       u8 reserved[3];
+} __packed;
+
+struct wmi_acs_passive_scan_complete_event {
+       __le32 dwell_time;
+       /* valid fields within channel info according to
+        * their appearance in struct order
+        */
+       __le16 filled;
+       u8 num_scanned_channels;
+       u8 reserved;
+       struct scan_acs_info scan_info_list[0];
+} __packed;
+
+/* WMI_BA_STATUS_EVENTID */
 enum wmi_vring_ba_status {
-       WMI_BA_AGREED                   = 0,
-       WMI_BA_NON_AGREED               = 1,
+       WMI_BA_AGREED                   = 0x00,
+       WMI_BA_NON_AGREED               = 0x01,
        /* BA_EN in middle of teardown flow */
-       WMI_BA_TD_WIP                   = 2,
+       WMI_BA_TD_WIP                   = 0x02,
        /* BA_DIS or BA_EN in middle of BA SETUP flow */
-       WMI_BA_SETUP_WIP                = 3,
+       WMI_BA_SETUP_WIP                = 0x03,
        /* BA_EN when the BA session is already active */
-       WMI_BA_SESSION_ACTIVE           = 4,
+       WMI_BA_SESSION_ACTIVE           = 0x04,
        /* BA_DIS when the BA session is not active */
-       WMI_BA_SESSION_NOT_ACTIVE       = 5,
+       WMI_BA_SESSION_NOT_ACTIVE       = 0x05,
 };
 
-struct wmi_vring_ba_status_event {
-       __le16 status; /* enum wmi_vring_ba_status */
+struct wmi_ba_status_event {
+       /* enum wmi_vring_ba_status */
+       __le16 status;
        u8 reserved[2];
        u8 ringid;
        u8 agg_wsize;
@@ -1151,18 +1120,14 @@ struct wmi_vring_ba_status_event {
        u8 amsdu;
 } __packed;
 
-/*
- * WMI_DELBA_EVENTID
- */
+/* WMI_DELBA_EVENTID */
 struct wmi_delba_event {
        u8 cidxtid;
        u8 from_initiator;
        __le16 reason;
 } __packed;
 
-/*
- * WMI_VRING_CFG_DONE_EVENTID
- */
+/* WMI_VRING_CFG_DONE_EVENTID */
 struct wmi_vring_cfg_done_event {
        u8 ringid;
        u8 status;
@@ -1170,174 +1135,151 @@ struct wmi_vring_cfg_done_event {
        __le32 tx_vring_tail_ptr;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_RESP_SENT_EVENTID
- */
+/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
 struct wmi_rcp_addba_resp_sent_event {
        u8 cidxtid;
        u8 reserved;
        __le16 status;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_REQ_EVENTID
- */
+/* WMI_RCP_ADDBA_REQ_EVENTID */
 struct wmi_rcp_addba_req_event {
        u8 cidxtid;
        u8 dialog_token;
-       __le16 ba_param_set;    /* ieee80211_ba_parameterset as it received */
+       /* ieee80211_ba_parameterset as it received */
+       __le16 ba_param_set;
        __le16 ba_timeout;
-       __le16 ba_seq_ctrl;     /* ieee80211_ba_seqstrl field as it received */
+       /* ieee80211_ba_seqstrl field as it received */
+       __le16 ba_seq_ctrl;
 } __packed;
 
-/*
- * WMI_CFG_RX_CHAIN_DONE_EVENTID
- */
+/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
 enum wmi_cfg_rx_chain_done_event_status {
-       WMI_CFG_RX_CHAIN_SUCCESS        = 1,
+       WMI_CFG_RX_CHAIN_SUCCESS        = 0x01,
 };
 
 struct wmi_cfg_rx_chain_done_event {
-       __le32 rx_ring_tail_ptr;        /* Rx V-Ring Tail pointer */
+       /* V-Ring Tail pointer */
+       __le32 rx_ring_tail_ptr;
        __le32 status;
 } __packed;
 
-/*
- * WMI_WBE_LINK_DOWN_EVENTID
- */
+/* WMI_WBE_LINK_DOWN_EVENTID */
 enum wmi_wbe_link_down_event_reason {
-       WMI_WBE_REASON_USER_REQUEST     = 0,
-       WMI_WBE_REASON_RX_DISASSOC      = 1,
-       WMI_WBE_REASON_BAD_PHY_LINK     = 2,
+       WMI_WBE_REASON_USER_REQUEST     = 0x00,
+       WMI_WBE_REASON_RX_DISASSOC      = 0x01,
+       WMI_WBE_REASON_BAD_PHY_LINK     = 0x02,
 };
 
+/* WMI_WBE_LINK_DOWN_EVENTID */
 struct wmi_wbe_link_down_event {
        u8 cid;
        u8 reserved[3];
        __le32 reason;
 } __packed;
 
-/*
- * WMI_DATA_PORT_OPEN_EVENTID
- */
+/* WMI_DATA_PORT_OPEN_EVENTID */
 struct wmi_data_port_open_event {
        u8 cid;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_VRING_EN_EVENTID
- */
+/* WMI_VRING_EN_EVENTID */
 struct wmi_vring_en_event {
        u8 vring_index;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_GET_PCP_CHANNEL_EVENTID
- */
+/* WMI_GET_PCP_CHANNEL_EVENTID */
 struct wmi_get_pcp_channel_event {
        u8 channel;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_P2P_CFG_DONE_EVENTID
- */
+/* WMI_P2P_CFG_DONE_EVENTID */
 struct wmi_p2p_cfg_done_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
-* WMI_PORT_ALLOCATED_EVENTID
-*/
+/* WMI_PORT_ALLOCATED_EVENTID */
 struct wmi_port_allocated_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
-* WMI_PORT_DELETED_EVENTID
-*/
+/* WMI_PORT_DELETED_EVENTID */
 struct wmi_port_deleted_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_LISTEN_STARTED_EVENTID
- */
+/* WMI_LISTEN_STARTED_EVENTID */
 struct wmi_listen_started_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_SEARCH_STARTED_EVENTID
- */
+/* WMI_SEARCH_STARTED_EVENTID */
 struct wmi_search_started_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_STARTED_EVENTID
- */
+/* WMI_PCP_STARTED_EVENTID */
 struct wmi_pcp_started_event {
-       u8 status;      /* wmi_fw_status */
+       /* wmi_fw_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_FACTOR_EVENTID
- */
+/* WMI_PCP_FACTOR_EVENTID */
 struct wmi_pcp_factor_event {
        __le32 pcp_factor;
 } __packed;
 
-/*
- * WMI_SW_TX_COMPLETE_EVENTID
- */
 enum wmi_sw_tx_status {
-       WMI_TX_SW_STATUS_SUCCESS                = 0,
-       WMI_TX_SW_STATUS_FAILED_NO_RESOURCES    = 1,
-       WMI_TX_SW_STATUS_FAILED_TX              = 2,
+       WMI_TX_SW_STATUS_SUCCESS                = 0x00,
+       WMI_TX_SW_STATUS_FAILED_NO_RESOURCES    = 0x01,
+       WMI_TX_SW_STATUS_FAILED_TX              = 0x02,
 };
 
+/* WMI_SW_TX_COMPLETE_EVENTID */
 struct wmi_sw_tx_complete_event {
-       u8 status;      /* enum wmi_sw_tx_status */
+       /* enum wmi_sw_tx_status */
+       u8 status;
        u8 reserved[3];
 } __packed;
 
-/*
- * WMI_CORR_MEASURE_EVENTID
- */
+/* WMI_CORR_MEASURE_EVENTID */
 struct wmi_corr_measure_event {
-       s32 i;
-       s32 q;
-       s32 image_i;
-       s32 image_q;
+       /* signed */
+       __le32 i;
+       /* signed */
+       __le32 q;
+       /* signed */
+       __le32 image_i;
+       /* signed */
+       __le32 image_q;
 } __packed;
 
-/*
- * WMI_READ_RSSI_EVENTID
- */
+/* WMI_READ_RSSI_EVENTID */
 struct wmi_read_rssi_event {
        __le32 ina_rssi_adc_dbm;
 } __packed;
 
-/*
- * WMI_GET_SSID_EVENTID
- */
+/* WMI_GET_SSID_EVENTID */
 struct wmi_get_ssid_event {
        __le32 ssid_len;
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_RX_MGMT_PACKET_EVENTID
- */
+/* wmi_rx_mgmt_info */
 struct wmi_rx_mgmt_info {
        u8 mcs;
        s8 snr;
@@ -1346,39 +1288,65 @@ struct wmi_rx_mgmt_info {
        __le16 stype;
        __le16 status;
        __le32 len;
+       /* Not resolved when == 0xFFFFFFFF  ==> Broadcast to all MIDS */
        u8 qid;
+       /* Not resolved when == 0xFFFFFFFF  ==> Broadcast to all MIDS */
        u8 mid;
        u8 cid;
-       u8 channel;     /* From Radio MNGR */
+       /* From Radio MNGR */
+       u8 channel;
 } __packed;
 
-/*
- * WMI_TX_MGMT_PACKET_EVENTID
- */
+/* wmi_otp_read_write_cmd */
+struct wmi_otp_read_write_cmd {
+       __le32 addr;
+       __le32 size;
+       u8 values[0];
+} __packed;
+
+/* WMI_OTP_READ_RESULT_EVENTID */
+struct wmi_otp_read_result_event {
+       u8 payload[0];
+} __packed;
+
+/* WMI_TX_MGMT_PACKET_EVENTID */
 struct wmi_tx_mgmt_packet_event {
        u8 payload[0];
 } __packed;
 
+/* WMI_RX_MGMT_PACKET_EVENTID */
 struct wmi_rx_mgmt_packet_event {
        struct wmi_rx_mgmt_info info;
        u8 payload[0];
 } __packed;
 
-/*
- * WMI_ECHO_RSP_EVENTID
- */
-struct wmi_echo_event {
+/* WMI_ECHO_RSP_EVENTID */
+struct wmi_echo_rsp_event {
        __le32 echoed_value;
 } __packed;
 
-/*
- * WMI_TEMP_SENSE_DONE_EVENTID
+/* WMI_TEMP_SENSE_DONE_EVENTID
  *
  * Measure MAC and radio temperatures
  */
 struct wmi_temp_sense_done_event {
+       /* Temperature times 1000 (actual temperature will be achieved by
+        * dividing the value by 1000)
+        */
        __le32 baseband_t1000;
+       /* Temperature times 1000 (actual temperature will be achieved by
+        * dividing the value by 1000)
+        */
        __le32 rf_t1000;
 } __packed;
 
+#define WMI_SCAN_DWELL_TIME_MS (100)
+#define WMI_SURVEY_TIMEOUT_MS  (10000)
+
+enum wmi_hidden_ssid {
+       WMI_HIDDEN_SSID_DISABLED        = 0x00,
+       WMI_HIDDEN_SSID_SEND_EMPTY      = 0x10,
+       WMI_HIDDEN_SSID_CLEAR           = 0xFE,
+};
+
 #endif /* __WILOCITY_WMI_H__ */
index 1efb1d66e0b7471499a4ff4386c8fa37f1df8bc0..7c108047fb461ffb15840b7f9d2428a889c50319 100644 (file)
@@ -1547,7 +1547,7 @@ static inline int at76_guess_freq(struct at76_priv *priv)
                channel = el[2];
 
 exit:
-       return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+       return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 }
 
 static void at76_rx_tasklet(unsigned long param)
@@ -1590,7 +1590,7 @@ static void at76_rx_tasklet(unsigned long param)
        rx_status.signal = buf->rssi;
        rx_status.flag |= RX_FLAG_DECRYPTED;
        rx_status.flag |= RX_FLAG_IV_STRIPPED;
-       rx_status.band = IEEE80211_BAND_2GHZ;
+       rx_status.band = NL80211_BAND_2GHZ;
        rx_status.freq = at76_guess_freq(priv);
 
        at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
@@ -2359,7 +2359,7 @@ static int at76_init_new_device(struct at76_priv *priv,
        priv->hw->wiphy->max_scan_ssids = 1;
        priv->hw->wiphy->max_scan_ie_len = 0;
        priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-       priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
+       priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
        ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
        ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
        priv->hw->max_signal = 100;
index 6a1f03c271c1c04074ec76fcad031f5bce954382..8f8f37f3a00c3314936edfe0295346f5c695f642 100644 (file)
@@ -2434,7 +2434,7 @@ static int atmel_get_range(struct net_device *dev,
 
                        /* Values in MHz -> * 10^5 * 10 */
                        range->freq[k].m = 100000 *
-                        ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
+                        ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
                        range->freq[k++].e = 1;
                }
                range->num_frequency = k;
index 0365524398168562df0e9cd7368ce95f107f1786..d7d42f0b80c3d2f3d73ab6affd2a4ebebb9f1f1f 100644 (file)
@@ -992,9 +992,9 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
 
 /**
  * b43_current_band - Returns the currently used band.
- * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
+ * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ.
  */
-static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
+static inline enum nl80211_band b43_current_band(struct b43_wl *wl)
 {
        return wl->hw->conf.chandef.chan->band;
 }
index 72380af9dc523e56055eecccd5402de62972412e..4ee5c5853f9f41e6746e4596211dda60be362f59 100644 (file)
@@ -187,7 +187,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
 #define b43_g_ratetable_size   12
 
 #define CHAN2G(_channel, _freq, _flags) {                      \
-       .band                   = IEEE80211_BAND_2GHZ,          \
+       .band                   = NL80211_BAND_2GHZ,            \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -216,7 +216,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
 #undef CHAN2G
 
 #define CHAN4G(_channel, _flags) {                             \
-       .band                   = IEEE80211_BAND_5GHZ,          \
+       .band                   = NL80211_BAND_5GHZ,            \
        .center_freq            = 4000 + (5 * (_channel)),      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -224,7 +224,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
        .max_power              = 30,                           \
 }
 #define CHAN5G(_channel, _flags) {                             \
-       .band                   = IEEE80211_BAND_5GHZ,          \
+       .band                   = NL80211_BAND_5GHZ,            \
        .center_freq            = 5000 + (5 * (_channel)),      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -323,7 +323,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
 #undef CHAN5G
 
 static struct ieee80211_supported_band b43_band_5GHz_nphy = {
-       .band           = IEEE80211_BAND_5GHZ,
+       .band           = NL80211_BAND_5GHZ,
        .channels       = b43_5ghz_nphy_chantable,
        .n_channels     = ARRAY_SIZE(b43_5ghz_nphy_chantable),
        .bitrates       = b43_a_ratetable,
@@ -331,7 +331,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy = {
 };
 
 static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
-       .band           = IEEE80211_BAND_5GHZ,
+       .band           = NL80211_BAND_5GHZ,
        .channels       = b43_5ghz_nphy_chantable_limited,
        .n_channels     = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
        .bitrates       = b43_a_ratetable,
@@ -339,7 +339,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
 };
 
 static struct ieee80211_supported_band b43_band_5GHz_aphy = {
-       .band           = IEEE80211_BAND_5GHZ,
+       .band           = NL80211_BAND_5GHZ,
        .channels       = b43_5ghz_aphy_chantable,
        .n_channels     = ARRAY_SIZE(b43_5ghz_aphy_chantable),
        .bitrates       = b43_a_ratetable,
@@ -347,7 +347,7 @@ static struct ieee80211_supported_band b43_band_5GHz_aphy = {
 };
 
 static struct ieee80211_supported_band b43_band_2GHz = {
-       .band           = IEEE80211_BAND_2GHZ,
+       .band           = NL80211_BAND_2GHZ,
        .channels       = b43_2ghz_chantable,
        .n_channels     = ARRAY_SIZE(b43_2ghz_chantable),
        .bitrates       = b43_g_ratetable,
@@ -355,7 +355,7 @@ static struct ieee80211_supported_band b43_band_2GHz = {
 };
 
 static struct ieee80211_supported_band b43_band_2ghz_limited = {
-       .band           = IEEE80211_BAND_2GHZ,
+       .band           = NL80211_BAND_2GHZ,
        .channels       = b43_2ghz_chantable,
        .n_channels     = b43_2ghz_chantable_limited_size,
        .bitrates       = b43_g_ratetable,
@@ -717,7 +717,7 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
 {
        /* slot_time is in usec. */
        /* This test used to exit for all but a G PHY. */
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                return;
        b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
        /* Shared memory location 0x0010 is the slot time and should be
@@ -3880,12 +3880,12 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
        mutex_unlock(&wl->mutex);
 }
 
-static const char *band_to_string(enum ieee80211_band band)
+static const char *band_to_string(enum nl80211_band band)
 {
        switch (band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                return "5";
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                return "2.4";
        default:
                break;
@@ -3903,10 +3903,10 @@ static int b43_switch_band(struct b43_wldev *dev,
        u32 tmp;
 
        switch (chan->band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                gmode = false;
                break;
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                gmode = true;
                break;
        default:
@@ -5294,16 +5294,16 @@ static int b43_setup_bands(struct b43_wldev *dev,
                     phy->radio_rev == 9;
 
        if (have_2ghz_phy)
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ?
+               hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ?
                        &b43_band_2ghz_limited : &b43_band_2GHz;
        if (dev->phy.type == B43_PHYTYPE_N) {
                if (have_5ghz_phy)
-                       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ?
+                       hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ?
                                &b43_band_5GHz_nphy_limited :
                                &b43_band_5GHz_nphy;
        } else {
                if (have_5ghz_phy)
-                       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
+                       hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
        }
 
        dev->phy.supports_2ghz = have_2ghz_phy;
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
        INIT_WORK(&wl->firmware_load, b43_request_firmware);
        schedule_work(&wl->firmware_load);
 
-bcma_out:
        return err;
 
 bcma_err_wireless_exit:
        ieee80211_free_hw(wl->hw);
+bcma_out:
+       kfree(dev);
        return err;
 }
 
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
        b43_rng_exit(wl);
 
        b43_leds_unregister(wl);
-
        ieee80211_free_hw(wl->hw);
+       kfree(wldev->dev);
 }
 
 static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
 
        b43_leds_unregister(wl);
        b43_wireless_exit(dev, wl);
+       kfree(dev);
 }
 
 static struct ssb_driver b43_ssb_driver = {
index e75633d67938672585ca92924c82372eaeb4fcaa..52f8abad8831d2dfcbf903d1f7b1bd87da40f5b7 100644 (file)
@@ -61,7 +61,7 @@ static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
 
 static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                return 11;
        return 36;
 }
index ec2b9c577b909b73aa1c086d68666b3ddb4b65f0..85f2ca98956567bd28599fbe4d1499e549a1f425 100644 (file)
@@ -436,7 +436,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
         * firmware from sending ghost packets.
         */
        channelcookie = new_channel;
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                channelcookie |= B43_SHM_SH_CHAN_5GHZ;
        /* FIXME: set 40Mhz flag if required */
        if (0)
index bd68945965d6967ba75f2a4a25d5328d3d649d12..718c90e81696eaf655817bfba05da4735f2bed93 100644 (file)
@@ -568,7 +568,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
        } else {
                b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
 
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        for (i = 0; i < 3; i++)
                                b43_phy_write(dev, cmd_regs[i], 0x32);
                }
@@ -643,7 +643,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
        u16 freq = dev->phy.chandef->chan->center_freq;
        int i, c;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                for (c = 0; c < 3; c++) {
                        target[c] = sprom->core_pwr_info[c].maxpwr_2g;
                        a1[c] = sprom->core_pwr_info[c].pa_2g[0];
@@ -777,7 +777,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
                                const struct b43_phy_ht_channeltab_e_phy *e,
                                struct ieee80211_channel *new_channel)
 {
-       if (new_channel->band == IEEE80211_BAND_5GHZ) {
+       if (new_channel->band == NL80211_BAND_5GHZ) {
                /* Switch to 2 GHz for a moment to access B-PHY regs */
                b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ);
 
@@ -805,7 +805,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
        } else {
                b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
                                      B43_PHY_HT_CLASS_CTL_OFDM_EN);
-               if (new_channel->band == IEEE80211_BAND_2GHZ)
+               if (new_channel->band == NL80211_BAND_2GHZ)
                        b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
        }
 
@@ -916,7 +916,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
        if (0) /* TODO: condition */
                ; /* TODO: PHY op on reg 0x217 */
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
        else
                b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
@@ -1005,7 +1005,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
        b43_phy_ht_classifier(dev, 0, 0);
        b43_phy_ht_read_clip_detection(dev, clip_state);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                b43_phy_ht_bphy_init(dev);
 
        b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
@@ -1077,7 +1077,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
        enum nl80211_channel_type channel_type =
                cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if ((new_channel < 1) || (new_channel > 14))
                        return -EINVAL;
        } else {
@@ -1089,7 +1089,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
 
 static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                return 11;
        return 36;
 }
index 97461ccf3e1e58ba4033b97756c0dd50491b13bf..63bd29f070f70d91609f02dbc71ca0e83623b7f1 100644 (file)
@@ -108,7 +108,7 @@ static void b43_radio_2064_channel_setup(struct b43_wldev *dev)
 /* wlc_radio_2064_init */
 static void b43_radio_2064_init(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_radio_write(dev, 0x09c, 0x0020);
                b43_radio_write(dev, 0x105, 0x0008);
        } else {
@@ -535,7 +535,7 @@ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
        b43_mac_suspend(dev);
 
        if (!dev->phy.lcn->hw_pwr_ctl_capable) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        tx_gains.gm_gain = 4;
                        tx_gains.pga_gain = 12;
                        tx_gains.pad_gain = 12;
@@ -720,7 +720,7 @@ static int b43_phy_lcn_op_init(struct b43_wldev *dev)
        else
                B43_WARN_ON(1);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                b43_phy_lcn_tx_pwr_ctl_init(dev);
 
        b43_switch_channel(dev, dev->phy.channel);
@@ -779,7 +779,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
        enum nl80211_channel_type channel_type =
                cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if ((new_channel < 1) || (new_channel > 14))
                        return -EINVAL;
        } else {
@@ -791,7 +791,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
 
 static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                return 1;
        return 36;
 }
index 058a9f2320503ef623748ed595ccbfdb8dbd9414..6922cbb99a044e253c3d474efde06e07fc94e956 100644 (file)
@@ -46,7 +46,7 @@ static inline u16 channel2freq_lp(u8 channel)
 
 static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                return 1;
        return 36;
 }
@@ -91,7 +91,7 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
        u32 ofdmpo;
        int i;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                lpphy->tx_isolation_med_band = sprom->tri2g;
                lpphy->bx_arch = sprom->bxa2g;
                lpphy->rx_pwr_offset = sprom->rxpo2g;
@@ -174,7 +174,7 @@ static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq)
 
        B43_WARN_ON(dev->phy.rev >= 2);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                isolation = lpphy->tx_isolation_med_band;
        else if (freq <= 5320)
                isolation = lpphy->tx_isolation_low_band;
@@ -238,7 +238,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
        b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
                        0xFF00, lpphy->rx_pwr_offset);
        if ((sprom->boardflags_lo & B43_BFL_FEM) &&
-          ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+          ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
           (sprom->boardflags_hi & B43_BFH_PAREF))) {
                ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
                ssb_pmu_set_ldo_paref(&bus->chipco, true);
@@ -280,7 +280,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
                b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
                b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
                b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
-       } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
+       } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ ||
                   (dev->dev->board_type == SSB_BOARD_BU4312) ||
                   (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
                b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
@@ -326,7 +326,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
                //FIXME the Broadcom driver caches & delays this HF write!
                b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
        }
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
                b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
                b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
@@ -466,7 +466,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
                b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
        }
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
                b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
                b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
@@ -547,7 +547,7 @@ static void lpphy_2062_init(struct b43_wldev *dev)
                b43_radio_write(dev, B2062_S_BG_CTL1,
                        (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
        }
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
        else
                b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
@@ -746,7 +746,7 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
                lpphy->crs_sys_disable = false;
 
        if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
                                        0xFF1F, 0x60);
                else
@@ -807,7 +807,7 @@ static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
        b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
        if (dev->phy.rev >= 2) {
                b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
                        b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
                }
@@ -823,7 +823,7 @@ static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
        b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
        if (dev->phy.rev >= 2) {
                b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
                        b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
                }
@@ -951,7 +951,7 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
                        0xFBFF, ext_lna << 10);
        b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
        b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                tmp = (gain >> 2) & 0x3;
                b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
                                0xE7FF, tmp<<11);
@@ -1344,7 +1344,7 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
        if (dev->phy.rev >= 2) {
                lpphy_rev2plus_rc_calib(dev);
        } else if (!lpphy->rc_cap) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        lpphy_rev0_1_rc_calib(dev);
        } else {
                lpphy_set_rc_cap(dev);
@@ -1548,7 +1548,7 @@ static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev)
 {
        struct lpphy_tx_gains gains;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                gains.gm = 4;
                gains.pad = 12;
                gains.pga = 12;
@@ -1902,7 +1902,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
 
        lpphy_set_trsw_over(dev, tx, rx);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
                b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
                                0xFFF7, pa << 3);
index 9f0bcf3b8414c7cb11713158f4f0fc485df07618..a5557d70689f417144ecc46cfeb17ee5df926f63 100644 (file)
@@ -105,9 +105,9 @@ enum n_rail_type {
 
 static inline bool b43_nphy_ipa(struct b43_wldev *dev)
 {
-       enum ieee80211_band band = b43_current_band(dev->wl);
-       return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
-               (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
+       enum nl80211_band band = b43_current_band(dev->wl);
+       return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+               (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
@@ -357,7 +357,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
                        break;
                case N_INTC_OVERRIDE_PA:
                        tmp = 0x0030;
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                                val = value << 5;
                        else
                                val = value << 4;
@@ -365,7 +365,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
                        b43_phy_set(dev, reg, 0x1000);
                        break;
                case N_INTC_OVERRIDE_EXT_LNA_PU:
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                                tmp = 0x0001;
                                tmp2 = 0x0004;
                                val = value;
@@ -378,7 +378,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
                        b43_phy_mask(dev, reg, ~tmp2);
                        break;
                case N_INTC_OVERRIDE_EXT_LNA_GAIN:
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                                tmp = 0x0002;
                                tmp2 = 0x0008;
                                val = value << 1;
@@ -465,7 +465,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                        }
                        break;
                case N_INTC_OVERRIDE_PA:
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                                tmp = 0x0020;
                                val = value << 5;
                        } else {
@@ -475,7 +475,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                        b43_phy_maskset(dev, reg, ~tmp, val);
                        break;
                case N_INTC_OVERRIDE_EXT_LNA_PU:
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                                tmp = 0x0001;
                                val = value;
                        } else {
@@ -485,7 +485,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                        b43_phy_maskset(dev, reg, ~tmp, val);
                        break;
                case N_INTC_OVERRIDE_EXT_LNA_GAIN:
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                                tmp = 0x0002;
                                val = value << 1;
                        } else {
@@ -600,7 +600,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
                b43_nphy_stay_in_carrier_search(dev, 1);
 
        if (nphy->gain_boost) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        gain[0] = 6;
                        gain[1] = 6;
                } else {
@@ -736,7 +736,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
        switch (phy->radio_rev) {
        case 0 ... 4:
        case 6:
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
                        b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
                        b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
@@ -751,7 +751,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
        case 9: /* e.g. PHY rev 16 */
                b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
                b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
                        b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
 
@@ -775,7 +775,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
                break;
        }
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                u16 txmix2g_tune_boost_pu = 0;
                u16 pad2g_tune_pus = 0;
 
@@ -1135,7 +1135,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
 {
        struct b43_phy *phy = &dev->phy;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
-       enum ieee80211_band band = b43_current_band(dev->wl);
+       enum nl80211_band band = b43_current_band(dev->wl);
        u16 offset;
        u8 i;
        u16 bias, cbias;
@@ -1152,10 +1152,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
 
        b43_chantab_radio_2056_upload(dev, e);
-       b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+       b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ);
 
        if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
-           b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+           b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
                if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
@@ -1168,21 +1168,21 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                }
        }
        if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
-           b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+           b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
                b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
        }
        if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
-           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+           b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
                b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
        }
 
-       if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+       if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) {
                for (i = 0; i < 2; i++) {
                        offset = i ? B2056_TX1 : B2056_TX0;
                        if (dev->phy.rev >= 5) {
@@ -1244,7 +1244,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        }
                        b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
                }
-       } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+       } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) {
                u16 freq = phy->chandef->chan->center_freq;
                if (freq < 5100) {
                        paa_boost = 0xA;
@@ -1501,7 +1501,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
                /* Follow wl, not specs. Do not force uploading all regs */
                b2055_upload_inittab(dev, 0, 0);
        } else {
-               bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+               bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ;
                b2055_upload_inittab(dev, ghz5, 0);
        }
        b43_radio_init2055_post(dev);
@@ -1785,7 +1785,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
                                b43_phy_maskset(dev, reg, 0xFFC3, 0);
 
                                if (rssi_type == N_RSSI_W1)
-                                       val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+                                       val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8;
                                else if (rssi_type == N_RSSI_W2)
                                        val = 16;
                                else
@@ -1813,12 +1813,12 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
 
                                if (rssi_type != N_RSSI_IQ &&
                                    rssi_type != N_RSSI_TBD) {
-                                       enum ieee80211_band band =
+                                       enum nl80211_band band =
                                                b43_current_band(dev->wl);
 
                                        if (dev->phy.rev < 7) {
                                                if (b43_nphy_ipa(dev))
-                                                       val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+                                                       val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE;
                                                else
                                                        val = 0x11;
                                                reg = (i == 0) ? B2056_TX0 : B2056_TX1;
@@ -2120,7 +2120,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                                                     1, 0, false);
                b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
                b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
                                                      0);
                        b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
@@ -2136,7 +2136,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
                b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
                b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
                        b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
                } else {
@@ -2257,7 +2257,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
 
        /* Store for future configuration */
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
                rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
        } else {
@@ -2289,7 +2289,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
 
        /* Remember for which channel we store configuration */
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
        else
                nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
@@ -2336,7 +2336,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
        b43_nphy_read_clip_detection(dev, clip_state);
        b43_nphy_write_clip_detection(dev, clip_off);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                override = 0x140;
        else
                override = 0x110;
@@ -2629,7 +2629,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
 
        if (nphy->gain_boost) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ &&
                    b43_is_40mhz(dev))
                        code = 4;
                else
@@ -2688,7 +2688,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
                ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
                0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
 }
 
@@ -2803,7 +2803,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
        scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
 
        if (b43_nphy_ipa(dev)) {
-               bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ;
+               bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ;
 
                switch (phy->radio_rev) {
                case 5:
@@ -2831,7 +2831,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
                                bcap_val_11b[core] = bcap_val;
                                lpf_ofdm_20mhz[core] = 4;
                                lpf_11b[core] = 1;
-                               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                                        scap_val_11n_20[core] = 0xc;
                                        bcap_val_11n_20[core] = 0xc;
                                        scap_val_11n_40[core] = 0xa;
@@ -2982,7 +2982,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
                        conv = 0x7f;
                        filt = 0xee;
                }
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        for (core = 0; core < 2; core++) {
                                if (core == 0) {
                                        b43_radio_write(dev, 0x5F, bias);
@@ -2998,7 +2998,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
        }
 
        if (b43_nphy_ipa(dev)) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
                            phy->radio_rev == 6) {
                                for (core = 0; core < 2; core++) {
@@ -3221,7 +3221,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                                         ARRAY_SIZE(rx2tx_events));
        }
 
-       tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+       tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ?
                0x2 : 0x9C40;
        b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
 
@@ -3240,7 +3240,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
        b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                pdet_range = sprom->fem.ghz2.pdet_range;
        else
                pdet_range = sprom->fem.ghz5.pdet_range;
@@ -3249,7 +3249,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        switch (pdet_range) {
        case 3:
                if (!(dev->phy.rev >= 4 &&
-                     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+                     b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
                        break;
                /* FALL THROUGH */
        case 0:
@@ -3261,7 +3261,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                break;
        case 2:
                if (dev->phy.rev >= 6) {
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                                vmid[3] = 0x94;
                        else
                                vmid[3] = 0x8e;
@@ -3277,7 +3277,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                break;
        case 4:
        case 5:
-               if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) {
                        if (pdet_range == 4) {
                                vmid[3] = 0x8e;
                                tmp16 = 0x96;
@@ -3322,9 +3322,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
 
        if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
-            b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+            b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
            (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
-            b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+            b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
                tmp32 = 0x00088888;
        else
                tmp32 = 0x88888888;
@@ -3333,7 +3333,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
        if (dev->phy.rev == 4 &&
-           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+           b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
                                0x70);
                b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
@@ -3376,7 +3376,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
                delays1[5] = 0x14;
        }
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ &&
            nphy->band5g_pwrgain) {
                b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
                b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
@@ -3451,7 +3451,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
        struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = phy->n;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                b43_nphy_classifier(dev, 1, 0);
        else
                b43_nphy_classifier(dev, 1, 1);
@@ -3586,7 +3586,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
                gain = (target.pad[core]) | (target.pga[core] << 4) |
                        (target.txgm[core] << 8);
 
-               indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+               indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ?
                        1 : 0;
                for (i = 0; i < 9; i++)
                        if (tbl_iqcal_gainparams[indx][i][0] == gain)
@@ -3614,7 +3614,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
        struct b43_phy_n *nphy = dev->phy.n;
        u8 i;
        u16 bmask, val, tmp;
-       enum ieee80211_band band = b43_current_band(dev->wl);
+       enum nl80211_band band = b43_current_band(dev->wl);
 
        if (nphy->hang_avoid)
                b43_nphy_stay_in_carrier_search(dev, 1);
@@ -3679,7 +3679,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
                }
                b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
 
-               if (band == IEEE80211_BAND_5GHZ) {
+               if (band == NL80211_BAND_5GHZ) {
                        if (phy->rev >= 19) {
                                /* TODO */
                        } else if (phy->rev >= 7) {
@@ -3770,7 +3770,7 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
                txpi[0] = 72;
                txpi[1] = 72;
        } else {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        txpi[0] = sprom->txpid2g[0];
                        txpi[1] = sprom->txpid2g[1];
                } else if (freq >= 4900 && freq < 5100) {
@@ -3868,7 +3868,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
        } else if (phy->rev >= 7) {
                for (core = 0; core < 2; core++) {
                        r = core ? 0x190 : 0x170;
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                                b43_radio_write(dev, r + 0x5, 0x5);
                                b43_radio_write(dev, r + 0x9, 0xE);
                                if (phy->rev != 5)
@@ -3892,7 +3892,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
                        b43_radio_write(dev, r + 0xC, 0);
                }
        } else {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
                else
                        b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
@@ -3909,7 +3909,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
                        b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
                        b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
                        b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                                b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
                                                0x5);
                                if (phy->rev != 5)
@@ -4098,7 +4098,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
                b0[0] = b0[1] = 5612;
                b1[0] = b1[1] = -1393;
        } else {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        for (c = 0; c < 2; c++) {
                                idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
                                target[c] = sprom->core_pwr_info[c].maxpwr_2g;
@@ -4153,11 +4153,11 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
                        for (c = 0; c < 2; c++) {
                                r = c ? 0x190 : 0x170;
                                if (b43_nphy_ipa(dev))
-                                       b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC);
+                                       b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC);
                        }
                } else {
                        if (b43_nphy_ipa(dev)) {
-                               tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+                               tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE;
                                b43_radio_write(dev,
                                        B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
                                b43_radio_write(dev,
@@ -4267,13 +4267,13 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
                } else if (phy->rev >= 7) {
                        pga_gain = (table[i] >> 24) & 0xf;
                        pad_gain = (table[i] >> 19) & 0x1f;
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                                rfpwr_offset = rf_pwr_offset_table[pad_gain];
                        else
                                rfpwr_offset = rf_pwr_offset_table[pga_gain];
                } else {
                        pga_gain = (table[i] >> 24) & 0xF;
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                                rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
                        else
                                rfpwr_offset = 0; /* FIXME */
@@ -4288,7 +4288,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
 static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
 {
        struct b43_phy_n *nphy = dev->phy.n;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u16 tmp;
 
        if (!enable) {
@@ -4300,12 +4300,12 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
                if (dev->phy.rev >= 7) {
                        tmp = 0x1480;
                } else if (dev->phy.rev >= 3) {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                tmp = 0x600;
                        else
                                tmp = 0x480;
                } else {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                tmp = 0x180;
                        else
                                tmp = 0x120;
@@ -4734,7 +4734,7 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
        u16 *rssical_radio_regs = NULL;
        u16 *rssical_phy_regs = NULL;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if (!nphy->rssical_chanspec_2G.center_freq)
                        return;
                rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
@@ -4804,7 +4804,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
                save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
                save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
 
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
                        b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
                        b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
@@ -4864,7 +4864,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
                save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
                save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
 
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                        b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
                        b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
                        b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
@@ -5005,7 +5005,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
                b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
                                              tbl_tx_filter_coef_rev4[3]);
        } else {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                        b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
                                                      tbl_tx_filter_coef_rev4[5]);
                if (dev->phy.channel == 14)
@@ -5185,7 +5185,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
                                                              false, 0);
                        } else if (phy->rev == 7) {
                                b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
-                               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                                        b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
                                        b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
                                } else {
@@ -5210,7 +5210,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
                b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
                regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
                regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                        tmp = 0x0180;
                else
                        tmp = 0x0120;
@@ -5233,7 +5233,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
        if (nphy->hang_avoid)
                b43_nphy_stay_in_carrier_search(dev, 1);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
                txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
                iqcal_chanspec = &nphy->iqcal_chanspec_2G;
@@ -5304,7 +5304,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
        u16 *txcal_radio_regs = NULL;
        struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if (!nphy->iqcal_chanspec_2G.center_freq)
                        return;
                table = nphy->cal_cache.txcal_coeffs_2G;
@@ -5332,7 +5332,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
        if (dev->phy.rev < 2)
                b43_nphy_tx_iq_workaround(dev);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
                rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
        } else {
@@ -5422,7 +5422,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
        phy6or5x = dev->phy.rev >= 6 ||
                (dev->phy.rev == 5 && nphy->ipa2g_on &&
-               b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+               b43_current_band(dev->wl) == NL80211_BAND_2GHZ);
        if (phy6or5x) {
                if (b43_is_40mhz(dev)) {
                        b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
@@ -5657,7 +5657,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
        u16 tmp[6];
        u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
        u32 real, imag;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        u8 use;
        u16 cur_hpf;
@@ -5712,18 +5712,18 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
                band = b43_current_band(dev->wl);
 
                if (nphy->rxcalparams & 0xFF000000) {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                b43_phy_write(dev, rfctl[0], 0x140);
                        else
                                b43_phy_write(dev, rfctl[0], 0x110);
                } else {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                b43_phy_write(dev, rfctl[0], 0x180);
                        else
                                b43_phy_write(dev, rfctl[0], 0x120);
                }
 
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        b43_phy_write(dev, rfctl[1], 0x148);
                else
                        b43_phy_write(dev, rfctl[1], 0x114);
@@ -5919,7 +5919,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
 #if 0
        /* Some extra gains */
        hw_gain = 6; /* N-PHY specific */
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                hw_gain += sprom->antenna_gain.a0;
        else
                hw_gain += sprom->antenna_gain.a1;
@@ -6043,7 +6043,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
        u8 tx_pwr_state;
        struct nphy_txgains target;
        u16 tmp;
-       enum ieee80211_band tmp2;
+       enum nl80211_band tmp2;
        bool do_rssi_cal;
 
        u16 clip[2];
@@ -6051,7 +6051,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
 
        if ((dev->phy.rev >= 3) &&
           (sprom->boardflags_lo & B43_BFL_EXTLNA) &&
-          (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+          (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) {
                switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
                case B43_BUS_BCMA:
@@ -6170,7 +6170,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
 
        b43_nphy_classifier(dev, 0, 0);
        b43_nphy_read_clip_detection(dev, clip);
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                b43_nphy_bphy_init(dev);
 
        tx_pwr_state = nphy->txpwrctrl;
@@ -6187,7 +6187,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
 
        do_rssi_cal = false;
        if (phy->rev >= 3) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
                else
                        do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
@@ -6201,7 +6201,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
        }
 
        if (!((nphy->measure_hold & 0x6) != 0)) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        do_cal = !nphy->iqcal_chanspec_2G.center_freq;
                else
                        do_cal = !nphy->iqcal_chanspec_5G.center_freq;
@@ -6291,7 +6291,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
        int ch = new_channel->hw_value;
        u16 tmp16;
 
-       if (new_channel->band == IEEE80211_BAND_5GHZ) {
+       if (new_channel->band == NL80211_BAND_5GHZ) {
                /* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
                b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
 
@@ -6302,7 +6302,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
                            B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
                b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
                b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
-       } else if (new_channel->band == IEEE80211_BAND_2GHZ) {
+       } else if (new_channel->band == NL80211_BAND_2GHZ) {
                b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
                tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
                b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
@@ -6319,7 +6319,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
                b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
        } else {
                b43_nphy_classifier(dev, 2, 2);
-               if (new_channel->band == IEEE80211_BAND_2GHZ)
+               if (new_channel->band == NL80211_BAND_2GHZ)
                        b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
        }
 
@@ -6449,7 +6449,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
                        &(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
 
                if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
-                       tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0;
+                       tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0;
                        b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
                        b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
                }
@@ -6457,12 +6457,12 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
                b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
                b43_nphy_channel_setup(dev, phy_regs, channel);
        } else if (phy->rev >= 3) {
-               tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
+               tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0;
                b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
                b43_radio_2056_setup(dev, tabent_r3);
                b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
        } else {
-               tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
+               tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050;
                b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
                b43_radio_2055_setup(dev, tabent_r2);
                b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
@@ -6692,7 +6692,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
        enum nl80211_channel_type channel_type =
                cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if ((new_channel < 1) || (new_channel > 14))
                        return -EINVAL;
        } else {
@@ -6705,7 +6705,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
 
 static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
 {
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                return 1;
        return 36;
 }
index cff187c5616d23922bfd7e8d633c3a6f47942f01..ce01e1645df763952d4c8a8bc0498999eb9457ae 100644 (file)
@@ -560,7 +560,7 @@ void b2062_upload_init_table(struct b43_wldev *dev)
 
        for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
                e = &b2062_init_tab[i];
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        if (!(e->flags & B206X_FLAG_G))
                                continue;
                        b43_radio_write(dev, e->offset, e->value_g);
@@ -579,7 +579,7 @@ void b2063_upload_init_table(struct b43_wldev *dev)
 
        for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
                e = &b2063_init_tab[i];
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                        if (!(e->flags & B206X_FLAG_G))
                                continue;
                        b43_radio_write(dev, e->offset, e->value_g);
@@ -2379,12 +2379,12 @@ static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset,
        tmp |= data.pga << 8;
        tmp |= data.gm;
        if (dev->phy.rev >= 3) {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                        tmp |= 0x10 << 24;
                else
                        tmp |= 0x70 << 24;
        } else {
-               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+               if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                        tmp |= 0x14 << 24;
                else
                        tmp |= 0x7F << 24;
@@ -2423,7 +2423,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
                    (sprom->boardflags_lo & B43_BFL_HGPA))
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev0_nopa_tx_gain_table);
-               else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev0_2ghz_tx_gain_table);
                else
@@ -2435,7 +2435,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
                    (sprom->boardflags_lo & B43_BFL_HGPA))
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev1_nopa_tx_gain_table);
-               else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev1_2ghz_tx_gain_table);
                else
@@ -2446,7 +2446,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
                if (sprom->boardflags_hi & B43_BFH_NOPA)
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev2_nopa_tx_gain_table);
-               else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
                        lpphy_write_gain_table_bulk(dev, 0, 128,
                                        lpphy_rev2_2ghz_tx_gain_table);
                else
index b2f0d245bcf3a0e71fb96797c47f71ad0ca736db..44e0957a70ccc33ba38fda878b9f9b6778956f7f 100644 (file)
@@ -3502,7 +3502,7 @@ static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev)
                { 0x2, 0x18, 0x2 }, /* Core 1 */
        };
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                antswlut = sprom->fem.ghz5.antswlut;
        else
                antswlut = sprom->fem.ghz2.antswlut;
@@ -3566,7 +3566,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
        u8 antswlut;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+       if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
                antswlut = sprom->fem.ghz5.antswlut;
        else
                antswlut = sprom->fem.ghz2.antswlut;
@@ -3651,7 +3651,7 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                switch (phy->rev) {
                case 17:
                        if (phy->radio_rev == 14)
@@ -3698,17 +3698,17 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
 const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
-       enum ieee80211_band band = b43_current_band(dev->wl);
+       enum nl80211_band band = b43_current_band(dev->wl);
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        if (dev->phy.rev < 3)
                return b43_ntab_tx_gain_rev0_1_2;
 
        /* rev 3+ */
-       if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
-           (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
+       if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+           (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) {
                return b43_nphy_get_ipa_gain_table(dev);
-       } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+       } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
                switch (phy->rev) {
                case 6:
                case 5:
@@ -3746,7 +3746,7 @@ const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                switch (phy->rev) {
                case 17:
                        if (phy->radio_rev == 14)
index e347b8d80ea49fca5f257a686f4af6206ff4bb3c..704ef1bcb5b17f0a0ff9a8efcd6e7de65f6fe4bc 100644 (file)
@@ -701,7 +701,7 @@ void b43_phy_lcn_tables_init(struct b43_wldev *dev)
 
        b43_phy_lcn_upload_static_tables(dev);
 
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+       if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
                if (sprom->boardflags_lo & B43_BFL_FEM)
                        b43_phy_lcn_load_tx_gain_tab(dev,
                                b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
index 426dc13c44cd6dff0504151e52b59bcdb87f06a0..f6201264de491b9c03dd71117a733121abb211a1 100644 (file)
@@ -803,7 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
        chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
        switch (chanstat & B43_RX_CHAN_PHYTYPE) {
        case B43_PHYTYPE_A:
-               status.band = IEEE80211_BAND_5GHZ;
+               status.band = NL80211_BAND_5GHZ;
                B43_WARN_ON(1);
                /* FIXME: We don't really know which value the "chanid" contains.
                 *        So the following assignment might be wrong. */
@@ -811,7 +811,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                        ieee80211_channel_to_frequency(chanid, status.band);
                break;
        case B43_PHYTYPE_G:
-               status.band = IEEE80211_BAND_2GHZ;
+               status.band = NL80211_BAND_2GHZ;
                /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
                 * has been modified to be compatible with N-PHY and others.
                 */
@@ -826,9 +826,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                /* chanid is the SHM channel cookie. Which is the plain
                 * channel number in b43. */
                if (chanstat & B43_RX_CHAN_5GHZ)
-                       status.band = IEEE80211_BAND_5GHZ;
+                       status.band = NL80211_BAND_5GHZ;
                else
-                       status.band = IEEE80211_BAND_2GHZ;
+                       status.band = NL80211_BAND_2GHZ;
                status.freq =
                        ieee80211_channel_to_frequency(chanid, status.band);
                break;
index afc1fb3e38dfebc9b3d1e8bc07bb6902fb21e3d8..83770d2ea057875ddd75ffb312e9b2f3d28e5e9f 100644 (file)
@@ -1056,7 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
        b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
        dur = ieee80211_generic_frame_duration(dev->wl->hw,
                                               dev->wl->vif,
-                                              IEEE80211_BAND_2GHZ,
+                                              NL80211_BAND_2GHZ,
                                               size,
                                               rate);
        /* Write PLCP in two parts and timing for packet transfer */
@@ -1122,7 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
                                         IEEE80211_STYPE_PROBE_RESP);
        dur = ieee80211_generic_frame_duration(dev->wl->hw,
                                               dev->wl->vif,
-                                              IEEE80211_BAND_2GHZ,
+                                              NL80211_BAND_2GHZ,
                                               *dest_size,
                                               rate);
        hdr->duration_id = dur;
@@ -2719,7 +2719,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
 
        /* Switch the PHY mode (if necessary). */
        switch (conf->chandef.chan->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                if (phy->type == B43legacy_PHYTYPE_B)
                        new_phymode = B43legacy_PHYMODE_B;
                else
@@ -2792,7 +2792,7 @@ out_unlock_mutex:
 static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
 {
        struct ieee80211_supported_band *sband =
-               dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+               dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
        struct ieee80211_rate *rate;
        int i;
        u16 basic, direct, offset, basic_offset, rateptr;
@@ -3630,13 +3630,13 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
 
        phy->possible_phymodes = 0;
        if (have_bphy) {
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
                        &b43legacy_band_2GHz_BPHY;
                phy->possible_phymodes |= B43legacy_PHYMODE_B;
        }
 
        if (have_gphy) {
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
                        &b43legacy_band_2GHz_GPHY;
                phy->possible_phymodes |= B43legacy_PHYMODE_G;
        }
index 34bf3f0b729f79c901ba5351bd47043591118563..35ccf400b02c2f51004176bc8647153890605fd9 100644 (file)
@@ -565,7 +565,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
        switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
        case B43legacy_PHYTYPE_B:
        case B43legacy_PHYTYPE_G:
-               status.band = IEEE80211_BAND_2GHZ;
+               status.band = NL80211_BAND_2GHZ;
                status.freq = chanid + 2400;
                break;
        default:
index 6af658e443e415f97bf7c96aa79eaded674f7f52..d1bc51f92686530ee020563f0f6dc604d2afd7cf 100644 (file)
@@ -321,7 +321,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
        if (pktbuf->len == 0)
                return -ENODATA;
 
-       *ifp = tmp_if;
+       if (ifp != NULL)
+               *ifp = tmp_if;
        return 0;
 }
 
@@ -351,6 +352,12 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
 {
 }
 
+static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
+                                      struct sk_buff *skb)
+{
+       brcmf_fws_rxreorder(ifp, skb);
+}
+
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
 {
        struct brcmf_bcdc *bcdc;
@@ -372,6 +379,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
        drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
        drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
        drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
+       drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
        drvr->proto->pd = bcdc;
 
        drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
index da0cdd3138802d6bf1925c5249cbb8e08e083cba..2fc0597f2cd07b378fb3ea44b1ea24250b0d28fa 100644 (file)
@@ -250,7 +250,7 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
                                    u32 addr, u8 regsz, void *data, bool write)
 {
        struct sdio_func *func;
-       int ret;
+       int ret = -EINVAL;
 
        brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
                  write, fn, addr, regsz);
index 8e02a478e889602a61fa4f07ad260d20bea9182b..2b246545647ac35810e6ade0ac81adab4c0424d8 100644 (file)
@@ -216,7 +216,9 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
                      int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+/* Receive async event packet from firmware. Callee disposes of rxp. */
+void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
 int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
index d5c2a27573b45cdfcbb72bf32dedc1dcd0af82da..d0631b6cfd5302ee7d73c25937f29f8184e0be83 100644 (file)
@@ -144,7 +144,7 @@ static struct ieee80211_rate __wl_rates[] = {
 #define wl_a_rates_size                (wl_g_rates_size - 4)
 
 #define CHAN2G(_channel, _freq) {                              \
-       .band                   = IEEE80211_BAND_2GHZ,          \
+       .band                   = NL80211_BAND_2GHZ,            \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
        .flags                  = IEEE80211_CHAN_DISABLED,      \
@@ -153,7 +153,7 @@ static struct ieee80211_rate __wl_rates[] = {
 }
 
 #define CHAN5G(_channel) {                                     \
-       .band                   = IEEE80211_BAND_5GHZ,          \
+       .band                   = NL80211_BAND_5GHZ,            \
        .center_freq            = 5000 + (5 * (_channel)),      \
        .hw_value               = (_channel),                   \
        .flags                  = IEEE80211_CHAN_DISABLED,      \
@@ -181,13 +181,13 @@ static struct ieee80211_channel __wl_5ghz_channels[] = {
  * above is added to the band during setup.
  */
 static const struct ieee80211_supported_band __wl_band_2ghz = {
-       .band = IEEE80211_BAND_2GHZ,
+       .band = NL80211_BAND_2GHZ,
        .bitrates = wl_g_rates,
        .n_bitrates = wl_g_rates_size,
 };
 
 static const struct ieee80211_supported_band __wl_band_5ghz = {
-       .band = IEEE80211_BAND_5GHZ,
+       .band = NL80211_BAND_5GHZ,
        .bitrates = wl_a_rates,
        .n_bitrates = wl_a_rates_size,
 };
@@ -250,6 +250,20 @@ struct parsed_vndr_ies {
        struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
+static u8 nl80211_band_to_fwil(enum nl80211_band band)
+{
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               return WLC_BAND_2G;
+       case NL80211_BAND_5GHZ:
+               return WLC_BAND_5G;
+       default:
+               WARN_ON(1);
+               break;
+       }
+       return 0;
+}
+
 static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
                               struct cfg80211_chan_def *ch)
 {
@@ -292,13 +306,13 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
                WARN_ON_ONCE(1);
        }
        switch (ch->chan->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                ch_inf.band = BRCMU_CHAN_BAND_2G;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                ch_inf.band = BRCMU_CHAN_BAND_5G;
                break;
-       case IEEE80211_BAND_60GHZ:
+       case NL80211_BAND_60GHZ:
        default:
                WARN_ON_ONCE(1);
        }
@@ -1796,6 +1810,50 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
        return type;
 }
 
+static void brcmf_set_join_pref(struct brcmf_if *ifp,
+                               struct cfg80211_bss_selection *bss_select)
+{
+       struct brcmf_join_pref_params join_pref_params[2];
+       enum nl80211_band band;
+       int err, i = 0;
+
+       join_pref_params[i].len = 2;
+       join_pref_params[i].rssi_gain = 0;
+
+       if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF)
+               brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO);
+
+       switch (bss_select->behaviour) {
+       case __NL80211_BSS_SELECT_ATTR_INVALID:
+               brcmf_c_set_joinpref_default(ifp);
+               return;
+       case NL80211_BSS_SELECT_ATTR_BAND_PREF:
+               join_pref_params[i].type = BRCMF_JOIN_PREF_BAND;
+               band = bss_select->param.band_pref;
+               join_pref_params[i].band = nl80211_band_to_fwil(band);
+               i++;
+               break;
+       case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST:
+               join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+               band = bss_select->param.adjust.band;
+               join_pref_params[i].band = nl80211_band_to_fwil(band);
+               join_pref_params[i].rssi_gain = bss_select->param.adjust.delta;
+               i++;
+               break;
+       case NL80211_BSS_SELECT_ATTR_RSSI:
+       default:
+               break;
+       }
+       join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI;
+       join_pref_params[i].len = 2;
+       join_pref_params[i].rssi_gain = 0;
+       join_pref_params[i].band = 0;
+       err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+                                      sizeof(join_pref_params));
+       if (err)
+               brcmf_err("Set join_pref error (%d)\n", err);
+}
+
 static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                       struct cfg80211_connect_params *sme)
@@ -1952,6 +2010,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
        }
 
+       brcmf_set_join_pref(ifp, &sme->bss_select);
+
        err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
                                         join_params_size);
        kfree(ext_join_params);
@@ -2679,9 +2739,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
        channel = bi->ctl_ch;
 
        if (channel <= CH_MAX_2G_CHANNEL)
-               band = wiphy->bands[IEEE80211_BAND_2GHZ];
+               band = wiphy->bands[NL80211_BAND_2GHZ];
        else
-               band = wiphy->bands[IEEE80211_BAND_5GHZ];
+               band = wiphy->bands[NL80211_BAND_5GHZ];
 
        freq = ieee80211_channel_to_frequency(channel, band->band);
        notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2788,9 +2848,9 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
        cfg->d11inf.decchspec(&ch);
 
        if (ch.band == BRCMU_CHAN_BAND_2G)
-               band = wiphy->bands[IEEE80211_BAND_2GHZ];
+               band = wiphy->bands[NL80211_BAND_2GHZ];
        else
-               band = wiphy->bands[IEEE80211_BAND_5GHZ];
+               band = wiphy->bands[NL80211_BAND_5GHZ];
 
        freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
        cfg->channel = freq;
@@ -3608,7 +3668,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
        if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
                wowl_config |= BRCMF_WOWL_UNASSOC;
 
-       brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear"));
+       brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
+                                sizeof(struct brcmf_wowl_wakeind_le));
        brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
        brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
        brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -5215,9 +5276,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
        cfg->d11inf.decchspec(&ch);
 
        if (ch.band == BRCMU_CHAN_BAND_2G)
-               band = wiphy->bands[IEEE80211_BAND_2GHZ];
+               band = wiphy->bands[NL80211_BAND_2GHZ];
        else
-               band = wiphy->bands[IEEE80211_BAND_5GHZ];
+               band = wiphy->bands[NL80211_BAND_5GHZ];
 
        freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
        notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -5707,11 +5768,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
        }
 
        wiphy = cfg_to_wiphy(cfg);
-       band = wiphy->bands[IEEE80211_BAND_2GHZ];
+       band = wiphy->bands[NL80211_BAND_2GHZ];
        if (band)
                for (i = 0; i < band->n_channels; i++)
                        band->channels[i].flags = IEEE80211_CHAN_DISABLED;
-       band = wiphy->bands[IEEE80211_BAND_5GHZ];
+       band = wiphy->bands[NL80211_BAND_5GHZ];
        if (band)
                for (i = 0; i < band->n_channels; i++)
                        band->channels[i].flags = IEEE80211_CHAN_DISABLED;
@@ -5722,9 +5783,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                cfg->d11inf.decchspec(&ch);
 
                if (ch.band == BRCMU_CHAN_BAND_2G) {
-                       band = wiphy->bands[IEEE80211_BAND_2GHZ];
+                       band = wiphy->bands[NL80211_BAND_2GHZ];
                } else if (ch.band == BRCMU_CHAN_BAND_5G) {
-                       band = wiphy->bands[IEEE80211_BAND_5GHZ];
+                       band = wiphy->bands[NL80211_BAND_5GHZ];
                } else {
                        brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
                        continue;
@@ -5839,7 +5900,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
                        return err;
                }
 
-               band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+               band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
                list = (struct brcmf_chanspec_list *)pbuf;
                num_chan = le32_to_cpu(list->count);
                for (i = 0; i < num_chan; i++) {
@@ -5871,11 +5932,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
        band = WLC_BAND_2G;
        err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
        if (!err) {
-               bw_cap[IEEE80211_BAND_2GHZ] = band;
+               bw_cap[NL80211_BAND_2GHZ] = band;
                band = WLC_BAND_5G;
                err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
                if (!err) {
-                       bw_cap[IEEE80211_BAND_5GHZ] = band;
+                       bw_cap[NL80211_BAND_5GHZ] = band;
                        return;
                }
                WARN_ON(1);
@@ -5890,14 +5951,14 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
 
        switch (mimo_bwcap) {
        case WLC_N_BW_40ALL:
-               bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+               bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
                /* fall-thru */
        case WLC_N_BW_20IN2G_40IN5G:
-               bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+               bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
                /* fall-thru */
        case WLC_N_BW_20ALL:
-               bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
-               bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+               bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+               bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
                break;
        default:
                brcmf_err("invalid mimo_bw_cap value\n");
@@ -5938,7 +5999,7 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
        __le16 mcs_map;
 
        /* not allowed in 2.4G band */
-       if (band->band == IEEE80211_BAND_2GHZ)
+       if (band->band == NL80211_BAND_2GHZ)
                return;
 
        band->vht_cap.vht_supported = true;
@@ -5997,8 +6058,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
                brcmf_get_bwcap(ifp, bw_cap);
        }
        brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
-                 nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
-                 bw_cap[IEEE80211_BAND_5GHZ]);
+                 nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
+                 bw_cap[NL80211_BAND_5GHZ]);
 
        err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
        if (err) {
@@ -6279,6 +6340,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
        wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites);
        if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
                wiphy->n_cipher_suites--;
+       wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) |
+                                   BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
+                                   BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+
        wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
                        WIPHY_FLAG_OFFCHAN_TX |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -6321,7 +6386,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
                        }
 
                        band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
-                       wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+                       wiphy->bands[NL80211_BAND_2GHZ] = band;
                }
                if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
                        band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
@@ -6338,7 +6403,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
                        }
 
                        band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
-                       wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+                       wiphy->bands[NL80211_BAND_5GHZ] = band;
                }
        }
        err = brcmf_setup_wiphybands(wiphy);
@@ -6604,13 +6669,13 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
                        kfree(wiphy->iface_combinations[i].limits);
        }
        kfree(wiphy->iface_combinations);
-       if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
-               kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-               kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+       if (wiphy->bands[NL80211_BAND_2GHZ]) {
+               kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels);
+               kfree(wiphy->bands[NL80211_BAND_2GHZ]);
        }
-       if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
-               kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
-               kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+       if (wiphy->bands[NL80211_BAND_5GHZ]) {
+               kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
+               kfree(wiphy->bands[NL80211_BAND_5GHZ]);
        }
        wiphy_free(wiphy);
 }
@@ -6698,8 +6763,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
         * cfg80211 here that we do and have it decide we can enable
         * it. But first check if device does support 2G operation.
         */
-       if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
-               cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+       if (wiphy->bands[NL80211_BAND_2GHZ]) {
+               cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
                *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
        }
        err = wiphy_register(wiphy);
index 9e909e3c2f0c0fdae44398ce967c1de015b5bfe6..3e15d64c64813513bc22202dd9e468588699abb0 100644 (file)
@@ -38,7 +38,7 @@ const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME        40
 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME        40
 
-/* boost value for RSSI_DELTA in preferred join selection */
+/* default boost value for RSSI_DELTA in preferred join selection */
 #define BRCMF_JOIN_PREF_RSSI_BOOST     8
 
 #define BRCMF_DEFAULT_TXGLOM_SIZE      32  /* max tx frames in glom chain */
@@ -83,11 +83,31 @@ MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
 static struct brcmfmac_platform_data *brcmfmac_pdata;
 struct brcmf_mp_global_t brcmf_mp_global;
 
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
+{
+       struct brcmf_join_pref_params join_pref_params[2];
+       int err;
+
+       /* Setup join_pref to select target by RSSI (boost on 5GHz) */
+       join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+       join_pref_params[0].len = 2;
+       join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+       join_pref_params[0].band = WLC_BAND_5G;
+
+       join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+       join_pref_params[1].len = 2;
+       join_pref_params[1].rssi_gain = 0;
+       join_pref_params[1].band = 0;
+       err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+                                      sizeof(join_pref_params));
+       if (err)
+               brcmf_err("Set join_pref error (%d)\n", err);
+}
+
 int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
        s8 eventmask[BRCMF_EVENTING_MASK_LEN];
        u8 buf[BRCMF_DCMD_SMLEN];
-       struct brcmf_join_pref_params join_pref_params[2];
        struct brcmf_rev_info_le revinfo;
        struct brcmf_rev_info *ri;
        char *ptr;
@@ -154,19 +174,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
                goto done;
        }
 
-       /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
-       join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
-       join_pref_params[0].len = 2;
-       join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
-       join_pref_params[0].band = WLC_BAND_5G;
-       join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
-       join_pref_params[1].len = 2;
-       join_pref_params[1].rssi_gain = 0;
-       join_pref_params[1].band = 0;
-       err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
-                                      sizeof(join_pref_params));
-       if (err)
-               brcmf_err("Set join_pref error (%d)\n", err);
+       brcmf_c_set_joinpref_default(ifp);
 
        /* Setup event_msgs, enable E_IF */
        err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
index ff825cd7739e29e946cb1120aeaf06b1ecfbbcdb..b590499f6883294525b094cb66c4449069eb2353 100644 (file)
 
 #define MAX_WAIT_FOR_8021X_TX                  msecs_to_jiffies(950)
 
-/* AMPDU rx reordering definitions */
-#define BRCMF_RXREORDER_FLOWID_OFFSET          0
-#define BRCMF_RXREORDER_MAXIDX_OFFSET          2
-#define BRCMF_RXREORDER_FLAGS_OFFSET           4
-#define BRCMF_RXREORDER_CURIDX_OFFSET          6
-#define BRCMF_RXREORDER_EXPIDX_OFFSET          8
-
-#define BRCMF_RXREORDER_DEL_FLOW               0x01
-#define BRCMF_RXREORDER_FLUSH_ALL              0x02
-#define BRCMF_RXREORDER_CURIDX_VALID           0x04
-#define BRCMF_RXREORDER_EXPIDX_VALID           0x08
-#define BRCMF_RXREORDER_NEW_HOLE               0x10
-
 #define BRCMF_BSSIDX_INVALID                   -1
 
 char *brcmf_ifname(struct brcmf_if *ifp)
@@ -313,15 +300,9 @@ void brcmf_txflowblock(struct device *dev, bool state)
 
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
 {
-       skb->dev = ifp->ndev;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
        if (skb->pkt_type == PACKET_MULTICAST)
                ifp->stats.multicast++;
 
-       /* Process special event packets */
-       brcmf_fweh_process_skb(ifp->drvr, skb);
-
        if (!(ifp->ndev->flags & IFF_UP)) {
                brcmu_pkt_buf_free_skb(skb);
                return;
@@ -341,226 +322,60 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
                netif_rx_ni(skb);
 }
 
-static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
-                                        u8 start, u8 end,
-                                        struct sk_buff_head *skb_list)
+static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+                           struct brcmf_if **ifp)
 {
-       /* initialize return list */
-       __skb_queue_head_init(skb_list);
+       int ret;
 
-       if (rfi->pend_pkts == 0) {
-               brcmf_dbg(INFO, "no packets in reorder queue\n");
-               return;
+       /* process and remove protocol-specific header */
+       ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+
+       if (ret || !(*ifp) || !(*ifp)->ndev) {
+               if (ret != -ENODATA && *ifp)
+                       (*ifp)->stats.rx_errors++;
+               brcmu_pkt_buf_free_skb(skb);
+               return -ENODATA;
        }
 
-       do {
-               if (rfi->pktslots[start]) {
-                       __skb_queue_tail(skb_list, rfi->pktslots[start]);
-                       rfi->pktslots[start] = NULL;
-               }
-               start++;
-               if (start > rfi->max_idx)
-                       start = 0;
-       } while (start != end);
-       rfi->pend_pkts -= skb_queue_len(skb_list);
+       skb->protocol = eth_type_trans(skb, (*ifp)->ndev);
+       return 0;
 }
 
-static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
-                                        struct sk_buff *pkt)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
 {
-       u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
-       struct brcmf_ampdu_rx_reorder *rfi;
-       struct sk_buff_head reorder_list;
-       struct sk_buff *pnext;
-       u8 flags;
-       u32 buf_size;
-
-       flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
-       flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
-
-       /* validate flags and flow id */
-       if (flags == 0xFF) {
-               brcmf_err("invalid flags...so ignore this packet\n");
-               brcmf_netif_rx(ifp, pkt);
-               return;
-       }
-
-       rfi = ifp->drvr->reorder_flows[flow_id];
-       if (flags & BRCMF_RXREORDER_DEL_FLOW) {
-               brcmf_dbg(INFO, "flow-%d: delete\n",
-                         flow_id);
+       struct brcmf_if *ifp;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_pub *drvr = bus_if->drvr;
 
-               if (rfi == NULL) {
-                       brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
-                                 flow_id);
-                       brcmf_netif_rx(ifp, pkt);
-                       return;
-               }
+       brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
-               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
-                                            &reorder_list);
-               /* add the last packet */
-               __skb_queue_tail(&reorder_list, pkt);
-               kfree(rfi);
-               ifp->drvr->reorder_flows[flow_id] = NULL;
-               goto netif_rx;
-       }
-       /* from here on we need a flow reorder instance */
-       if (rfi == NULL) {
-               buf_size = sizeof(*rfi);
-               max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-
-               buf_size += (max_idx + 1) * sizeof(pkt);
-
-               /* allocate space for flow reorder info */
-               brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
-                         flow_id, max_idx);
-               rfi = kzalloc(buf_size, GFP_ATOMIC);
-               if (rfi == NULL) {
-                       brcmf_err("failed to alloc buffer\n");
-                       brcmf_netif_rx(ifp, pkt);
-                       return;
-               }
+       if (brcmf_rx_hdrpull(drvr, skb, &ifp))
+               return;
 
-               ifp->drvr->reorder_flows[flow_id] = rfi;
-               rfi->pktslots = (struct sk_buff **)(rfi+1);
-               rfi->max_idx = max_idx;
-       }
-       if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
-               if (rfi->pend_pkts) {
-                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
-                                                    rfi->exp_idx,
-                                                    &reorder_list);
-                       WARN_ON(rfi->pend_pkts);
-               } else {
-                       __skb_queue_head_init(&reorder_list);
-               }
-               rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
-               rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-               rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-               rfi->pktslots[rfi->cur_idx] = pkt;
-               rfi->pend_pkts++;
-               brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
-                         flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
-       } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
-               cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
-               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
-               if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
-                       /* still in the current hole */
-                       /* enqueue the current on the buffer chain */
-                       if (rfi->pktslots[cur_idx] != NULL) {
-                               brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
-                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
-                               rfi->pktslots[cur_idx] = NULL;
-                       }
-                       rfi->pktslots[cur_idx] = pkt;
-                       rfi->pend_pkts++;
-                       rfi->cur_idx = cur_idx;
-                       brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
-                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
-                       /* can return now as there is no reorder
-                        * list to process.
-                        */
-                       return;
-               }
-               if (rfi->exp_idx == cur_idx) {
-                       if (rfi->pktslots[cur_idx] != NULL) {
-                               brcmf_dbg(INFO, "error buffer pending..free it\n");
-                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
-                               rfi->pktslots[cur_idx] = NULL;
-                       }
-                       rfi->pktslots[cur_idx] = pkt;
-                       rfi->pend_pkts++;
-
-                       /* got the expected one. flush from current to expected
-                        * and update expected
-                        */
-                       brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
-                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
-                       rfi->cur_idx = cur_idx;
-                       rfi->exp_idx = exp_idx;
-
-                       brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
-                                                    &reorder_list);
-                       brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
-                                 flow_id, skb_queue_len(&reorder_list),
-                                 rfi->pend_pkts);
-               } else {
-                       u8 end_idx;
-
-                       brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
-                                 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
-                                 cur_idx, exp_idx);
-                       if (flags & BRCMF_RXREORDER_FLUSH_ALL)
-                               end_idx = rfi->exp_idx;
-                       else
-                               end_idx = exp_idx;
-
-                       /* flush pkts first */
-                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
-                                                    &reorder_list);
-
-                       if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
-                               __skb_queue_tail(&reorder_list, pkt);
-                       } else {
-                               rfi->pktslots[cur_idx] = pkt;
-                               rfi->pend_pkts++;
-                       }
-                       rfi->exp_idx = exp_idx;
-                       rfi->cur_idx = cur_idx;
-               }
+       if (brcmf_proto_is_reorder_skb(skb)) {
+               brcmf_proto_rxreorder(ifp, skb);
        } else {
-               /* explicity window move updating the expected index */
-               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
-               brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
-                         flow_id, flags, rfi->exp_idx, exp_idx);
-               if (flags & BRCMF_RXREORDER_FLUSH_ALL)
-                       end_idx =  rfi->exp_idx;
-               else
-                       end_idx =  exp_idx;
+               /* Process special event packets */
+               if (handle_event)
+                       brcmf_fweh_process_skb(ifp->drvr, skb);
 
-               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
-                                            &reorder_list);
-               __skb_queue_tail(&reorder_list, pkt);
-               /* set the new expected idx */
-               rfi->exp_idx = exp_idx;
-       }
-netif_rx:
-       skb_queue_walk_safe(&reorder_list, pkt, pnext) {
-               __skb_unlink(pkt, &reorder_list);
-               brcmf_netif_rx(ifp, pkt);
+               brcmf_netif_rx(ifp, skb);
        }
 }
 
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
 {
        struct brcmf_if *ifp;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
-       struct brcmf_skb_reorder_data *rd;
-       int ret;
 
-       brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
-
-       /* process and remove protocol-specific header */
-       ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
+       brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
-       if (ret || !ifp || !ifp->ndev) {
-               if (ret != -ENODATA && ifp)
-                       ifp->stats.rx_errors++;
-               brcmu_pkt_buf_free_skb(skb);
+       if (brcmf_rx_hdrpull(drvr, skb, &ifp))
                return;
-       }
 
-       rd = (struct brcmf_skb_reorder_data *)skb->cb;
-       if (rd->reorder)
-               brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
-       else
-               brcmf_netif_rx(ifp, skb);
+       brcmf_fweh_process_skb(ifp->drvr, skb);
+       brcmu_pkt_buf_free_skb(skb);
 }
 
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
index 7bdb6fef99c3fba9fb3b2970683fd80df0482678..647d3cc2a4dca297881e33ae8561641dbeb51705 100644 (file)
@@ -208,10 +208,6 @@ struct brcmf_if {
        u8 ipv6addr_idx;
 };
 
-struct brcmf_skb_reorder_data {
-       u8 *reorder;
-};
-
 int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
 
 /* Return pointer to interface name */
@@ -227,6 +223,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
 int __init brcmf_core_init(void);
 void __exit brcmf_core_exit(void);
 
index 7269056d0044d82727f57cf2b05d60f5f93118a7..c7c1e9906500fd5be3d386ea4c43848caaff6ac1 100644 (file)
@@ -29,6 +29,7 @@
 #define BRCMF_FW_MAX_NVRAM_SIZE                        64000
 #define BRCMF_FW_NVRAM_DEVPATH_LEN             19      /* devpath0=pcie/1/4/ */
 #define BRCMF_FW_NVRAM_PCIEDEV_LEN             10      /* pcie/1/4/ + \0 */
+#define BRCMF_FW_DEFAULT_BOARDREV              "boardrev=0xff"
 
 enum nvram_parser_state {
        IDLE,
@@ -51,6 +52,7 @@ enum nvram_parser_state {
  * @entry: start position of key,value entry.
  * @multi_dev_v1: detect pcie multi device v1 (compressed).
  * @multi_dev_v2: detect pcie multi device v2.
+ * @boardrev_found: nvram contains boardrev information.
  */
 struct nvram_parser {
        enum nvram_parser_state state;
@@ -63,6 +65,7 @@ struct nvram_parser {
        u32 entry;
        bool multi_dev_v1;
        bool multi_dev_v2;
+       bool boardrev_found;
 };
 
 /**
@@ -125,6 +128,8 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
                        nvp->multi_dev_v1 = true;
                if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
                        nvp->multi_dev_v2 = true;
+               if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
+                       nvp->boardrev_found = true;
        } else if (!is_nvram_char(c) || c == ' ') {
                brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
                          nvp->line, nvp->column);
@@ -284,6 +289,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
        while (i < nvp->nvram_len) {
                if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
                        i += 2;
+                       if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+                               nvp->boardrev_found = true;
                        while (nvp->nvram[i] != 0) {
                                nvram[j] = nvp->nvram[i];
                                i++;
@@ -335,6 +342,8 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
        while (i < nvp->nvram_len - len) {
                if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
                        i += len;
+                       if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+                               nvp->boardrev_found = true;
                        while (nvp->nvram[i] != 0) {
                                nvram[j] = nvp->nvram[i];
                                i++;
@@ -356,6 +365,18 @@ fail:
        nvp->nvram_len = 0;
 }
 
+static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
+{
+       if (nvp->boardrev_found)
+               return;
+
+       memcpy(&nvp->nvram[nvp->nvram_len], &BRCMF_FW_DEFAULT_BOARDREV,
+              strlen(BRCMF_FW_DEFAULT_BOARDREV));
+       nvp->nvram_len += strlen(BRCMF_FW_DEFAULT_BOARDREV);
+       nvp->nvram[nvp->nvram_len] = '\0';
+       nvp->nvram_len++;
+}
+
 /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
  * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
  * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
@@ -377,16 +398,21 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
                if (nvp.state == END)
                        break;
        }
-       if (nvp.multi_dev_v1)
+       if (nvp.multi_dev_v1) {
+               nvp.boardrev_found = false;
                brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
-       else if (nvp.multi_dev_v2)
+       } else if (nvp.multi_dev_v2) {
+               nvp.boardrev_found = false;
                brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+       }
 
        if (nvp.nvram_len == 0) {
                kfree(nvp.nvram);
                return NULL;
        }
 
+       brcmf_fw_add_defaults(&nvp);
+
        pad = nvp.nvram_len;
        *new_length = roundup(nvp.nvram_len + 1, 4);
        while (pad != *new_length) {
index d414fbbcc81400630a881c8205ea3e410c627469..b390561255b3b7ae125a87ea5a5c00b60c442386 100644 (file)
@@ -371,6 +371,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
        int i, err;
        s8 eventmask[BRCMF_EVENTING_MASK_LEN];
 
+       memset(eventmask, 0, sizeof(eventmask));
        for (i = 0; i < BRCMF_E_LAST; i++) {
                if (ifp->drvr->fweh.evt_handler[i]) {
                        brcmf_dbg(EVENT, "enable event %s\n",
index 6b72df17744e5d73b820c78b431cc0bbc5063737..3a9a76dd922265425802b1e7a49c5d33878c496c 100644 (file)
@@ -78,6 +78,7 @@
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME          185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME          187
 #define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON  201
+#define BRCMF_C_SET_ASSOC_PREFER               205
 #define BRCMF_C_GET_VALID_CHANNELS             217
 #define BRCMF_C_GET_KEY_PRIMARY                        235
 #define BRCMF_C_SET_KEY_PRIMARY                        236
index f82c9ab5480b33b39622ae81cf642c2f839c2ab5..5b30922b67ec27cbe698eb3325ee4c3373420546 100644 (file)
@@ -92,6 +92,19 @@ enum brcmf_fws_tlv_len {
 };
 #undef BRCMF_FWS_TLV_DEF
 
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET          0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET          2
+#define BRCMF_RXREORDER_FLAGS_OFFSET           4
+#define BRCMF_RXREORDER_CURIDX_OFFSET          6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET          8
+
+#define BRCMF_RXREORDER_DEL_FLOW               0x01
+#define BRCMF_RXREORDER_FLUSH_ALL              0x02
+#define BRCMF_RXREORDER_CURIDX_VALID           0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID           0x08
+#define BRCMF_RXREORDER_NEW_HOLE               0x10
+
 #ifdef DEBUG
 /*
  * brcmf_fws_tlv_names - array of tlv names.
@@ -1614,6 +1627,202 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
        return 0;
 }
 
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+                                        u8 start, u8 end,
+                                        struct sk_buff_head *skb_list)
+{
+       /* initialize return list */
+       __skb_queue_head_init(skb_list);
+
+       if (rfi->pend_pkts == 0) {
+               brcmf_dbg(INFO, "no packets in reorder queue\n");
+               return;
+       }
+
+       do {
+               if (rfi->pktslots[start]) {
+                       __skb_queue_tail(skb_list, rfi->pktslots[start]);
+                       rfi->pktslots[start] = NULL;
+               }
+               start++;
+               if (start > rfi->max_idx)
+                       start = 0;
+       } while (start != end);
+       rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+{
+       u8 *reorder_data;
+       u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+       struct brcmf_ampdu_rx_reorder *rfi;
+       struct sk_buff_head reorder_list;
+       struct sk_buff *pnext;
+       u8 flags;
+       u32 buf_size;
+
+       reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
+       flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+       flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+       /* validate flags and flow id */
+       if (flags == 0xFF) {
+               brcmf_err("invalid flags...so ignore this packet\n");
+               brcmf_netif_rx(ifp, pkt);
+               return;
+       }
+
+       rfi = ifp->drvr->reorder_flows[flow_id];
+       if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+               brcmf_dbg(INFO, "flow-%d: delete\n",
+                         flow_id);
+
+               if (rfi == NULL) {
+                       brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+                                 flow_id);
+                       brcmf_netif_rx(ifp, pkt);
+                       return;
+               }
+
+               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+                                            &reorder_list);
+               /* add the last packet */
+               __skb_queue_tail(&reorder_list, pkt);
+               kfree(rfi);
+               ifp->drvr->reorder_flows[flow_id] = NULL;
+               goto netif_rx;
+       }
+       /* from here on we need a flow reorder instance */
+       if (rfi == NULL) {
+               buf_size = sizeof(*rfi);
+               max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+               buf_size += (max_idx + 1) * sizeof(pkt);
+
+               /* allocate space for flow reorder info */
+               brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+                         flow_id, max_idx);
+               rfi = kzalloc(buf_size, GFP_ATOMIC);
+               if (rfi == NULL) {
+                       brcmf_err("failed to alloc buffer\n");
+                       brcmf_netif_rx(ifp, pkt);
+                       return;
+               }
+
+               ifp->drvr->reorder_flows[flow_id] = rfi;
+               rfi->pktslots = (struct sk_buff **)(rfi + 1);
+               rfi->max_idx = max_idx;
+       }
+       if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
+               if (rfi->pend_pkts) {
+                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+                                                    rfi->exp_idx,
+                                                    &reorder_list);
+                       WARN_ON(rfi->pend_pkts);
+               } else {
+                       __skb_queue_head_init(&reorder_list);
+               }
+               rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+               rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+               rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+               rfi->pktslots[rfi->cur_idx] = pkt;
+               rfi->pend_pkts++;
+               brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+                         flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+       } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+               cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+               if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+                       /* still in the current hole */
+                       /* enqueue the current on the buffer chain */
+                       if (rfi->pktslots[cur_idx] != NULL) {
+                               brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+                               rfi->pktslots[cur_idx] = NULL;
+                       }
+                       rfi->pktslots[cur_idx] = pkt;
+                       rfi->pend_pkts++;
+                       rfi->cur_idx = cur_idx;
+                       brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+                       /* can return now as there is no reorder
+                        * list to process.
+                        */
+                       return;
+               }
+               if (rfi->exp_idx == cur_idx) {
+                       if (rfi->pktslots[cur_idx] != NULL) {
+                               brcmf_dbg(INFO, "error buffer pending..free it\n");
+                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+                               rfi->pktslots[cur_idx] = NULL;
+                       }
+                       rfi->pktslots[cur_idx] = pkt;
+                       rfi->pend_pkts++;
+
+                       /* got the expected one. flush from current to expected
+                        * and update expected
+                        */
+                       brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+                       rfi->cur_idx = cur_idx;
+                       rfi->exp_idx = exp_idx;
+
+                       brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+                                                    &reorder_list);
+                       brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+                                 flow_id, skb_queue_len(&reorder_list),
+                                 rfi->pend_pkts);
+               } else {
+                       u8 end_idx;
+
+                       brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+                                 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+                                 cur_idx, exp_idx);
+                       if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+                               end_idx = rfi->exp_idx;
+                       else
+                               end_idx = exp_idx;
+
+                       /* flush pkts first */
+                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+                                                    &reorder_list);
+
+                       if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+                               __skb_queue_tail(&reorder_list, pkt);
+                       } else {
+                               rfi->pktslots[cur_idx] = pkt;
+                               rfi->pend_pkts++;
+                       }
+                       rfi->exp_idx = exp_idx;
+                       rfi->cur_idx = cur_idx;
+               }
+       } else {
+               /* explicity window move updating the expected index */
+               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+               brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+                         flow_id, flags, rfi->exp_idx, exp_idx);
+               if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+                       end_idx =  rfi->exp_idx;
+               else
+                       end_idx =  exp_idx;
+
+               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+                                            &reorder_list);
+               __skb_queue_tail(&reorder_list, pkt);
+               /* set the new expected idx */
+               rfi->exp_idx = exp_idx;
+       }
+netif_rx:
+       skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+               __skb_unlink(pkt, &reorder_list);
+               brcmf_netif_rx(ifp, pkt);
+       }
+}
+
 void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 {
        struct brcmf_skb_reorder_data *rd;
index a36bac17eafdee9210b7a422740ac90d76298ef8..ef0ad8597c8a08796092a3567b9c61d6cb7d3f05 100644 (file)
@@ -29,5 +29,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
 void brcmf_fws_del_interface(struct brcmf_if *ifp);
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
 
 #endif /* FWSIGNAL_H_ */
index 922966734a7f3e8f48e4576b0e6066bc5b3c6fea..68f1ce02f4bf83d8d787e256cb5d74628cc4040e 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/types.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
@@ -526,6 +527,9 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
        return -ENODEV;
 }
 
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+}
 
 static void
 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
@@ -1075,28 +1079,13 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
 }
 
 
-static void
-brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
-                   u8 ifidx)
-{
-       struct brcmf_if *ifp;
-
-       ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
-       if (!ifp || !ifp->ndev) {
-               brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
-               brcmu_pkt_buf_free_skb(skb);
-               return;
-       }
-       brcmf_netif_rx(ifp, skb);
-}
-
-
 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
 {
        struct msgbuf_rx_event *event;
        u32 idx;
        u16 buflen;
        struct sk_buff *skb;
+       struct brcmf_if *ifp;
 
        event = (struct msgbuf_rx_event *)buf;
        idx = le32_to_cpu(event->msg.request_id);
@@ -1116,7 +1105,19 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
 
        skb_trim(skb, buflen);
 
-       brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+       ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
+       if (!ifp || !ifp->ndev) {
+               brcmf_err("Received pkt for invalid ifidx %d\n",
+                         event->msg.ifidx);
+               goto exit;
+       }
+
+       skb->protocol = eth_type_trans(skb, ifp->ndev);
+
+       brcmf_fweh_process_skb(ifp->drvr, skb);
+
+exit:
+       brcmu_pkt_buf_free_skb(skb);
 }
 
 
@@ -1128,6 +1129,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
        u16 data_offset;
        u16 buflen;
        u32 idx;
+       struct brcmf_if *ifp;
 
        brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
 
@@ -1148,7 +1150,14 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 
        skb_trim(skb, buflen);
 
-       brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+       ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
+       if (!ifp || !ifp->ndev) {
+               brcmf_err("Received pkt for invalid ifidx %d\n",
+                         rx_complete->msg.ifidx);
+               brcmu_pkt_buf_free_skb(skb);
+               return;
+       }
+       brcmf_netif_rx(ifp, skb);
 }
 
 
@@ -1460,6 +1469,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
        drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
        drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
        drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+       drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
        drvr->proto->pd = msgbuf;
 
        init_waitqueue_head(&msgbuf->ioctl_resp_wait);
index b5a49e564f25567fcc218a114302bd5c7ad5318b..a70cda6c05920811a84605985a2024aa0b10b792 100644 (file)
@@ -1266,7 +1266,7 @@ static void
 brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_p2p_info *p2p = &cfg->p2p;
-       struct brcmf_if *ifp = cfg->escan_info.ifp;
+       struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
 
        if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
            (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1430,8 +1430,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
 
        freq = ieee80211_channel_to_frequency(ch.chnum,
                                              ch.band == BRCMU_CHAN_BAND_2G ?
-                                             IEEE80211_BAND_2GHZ :
-                                             IEEE80211_BAND_5GHZ);
+                                             NL80211_BAND_2GHZ :
+                                             NL80211_BAND_5GHZ);
 
        wdev = &ifp->vif->wdev;
        cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
@@ -1900,8 +1900,8 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
        mgmt_frame_len = e->datalen - sizeof(*rxframe);
        freq = ieee80211_channel_to_frequency(ch.chnum,
                                              ch.band == BRCMU_CHAN_BAND_2G ?
-                                             IEEE80211_BAND_2GHZ :
-                                             IEEE80211_BAND_5GHZ);
+                                             NL80211_BAND_2GHZ :
+                                             NL80211_BAND_5GHZ);
 
        cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
 
index d55119d3675516ec79dbcc66af54f202d9693bf8..57531f42190ef21715f980abf862b084fbb2ee22 100644 (file)
@@ -22,6 +22,9 @@ enum proto_addr_mode {
        ADDR_DIRECT
 };
 
+struct brcmf_skb_reorder_data {
+       u8 *reorder;
+};
 
 struct brcmf_proto {
        int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
@@ -38,6 +41,7 @@ struct brcmf_proto {
                            u8 peer[ETH_ALEN]);
        void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
                              u8 peer[ETH_ALEN]);
+       void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
        void *pd;
 };
 
@@ -91,6 +95,18 @@ brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 {
        drvr->proto->add_tdls_peer(drvr, ifidx, peer);
 }
+static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
+{
+       struct brcmf_skb_reorder_data *rd;
+
+       rd = (struct brcmf_skb_reorder_data *)skb->cb;
+       return !!rd->reorder;
+}
 
+static inline void
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+       ifp->drvr->proto->rxreorder(ifp, skb);
+}
 
 #endif /* BRCMFMAC_PROTO_H */
index 43fd3f402ebad89f89f7d5f8afdce9613d87b5de..4252fa82b89c64ec8758fde328fbda1754d09aca 100644 (file)
@@ -535,9 +535,6 @@ static int qcount[NUMPRIO];
 
 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
 
-/* Retry count for register access failures */
-static const uint retry_limit = 2;
-
 /* Limit on rounding up frames */
 static const uint max_roundup = 512;
 
@@ -1297,6 +1294,17 @@ static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
        return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
 }
 
+static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
+{
+       u32 hdrvalue;
+       u8 ret;
+
+       hdrvalue = *(u32 *)swheader;
+       ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
+
+       return (ret == SDPCM_EVENT_CHANNEL);
+}
+
 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
                              struct brcmf_sdio_hdrinfo *rd,
                              enum brcmf_sdio_frmtype type)
@@ -1644,7 +1652,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                           pfirst->len, pfirst->next,
                                           pfirst->prev);
                        skb_unlink(pfirst, &bus->glom);
-                       brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+                       if (brcmf_sdio_fromevntchan(pfirst->data))
+                               brcmf_rx_event(bus->sdiodev->dev, pfirst);
+                       else
+                               brcmf_rx_frame(bus->sdiodev->dev, pfirst,
+                                              false);
                        bus->sdcnt.rxglompkts++;
                }
 
@@ -1970,18 +1982,19 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                __skb_trim(pkt, rd->len);
                skb_pull(pkt, rd->dat_offset);
 
+               if (pkt->len == 0)
+                       brcmu_pkt_buf_free_skb(pkt);
+               else if (rd->channel == SDPCM_EVENT_CHANNEL)
+                       brcmf_rx_event(bus->sdiodev->dev, pkt);
+               else
+                       brcmf_rx_frame(bus->sdiodev->dev, pkt,
+                                      false);
+
                /* prepare the descriptor for the next read */
                rd->len = rd->len_nxtfrm << 4;
                rd->len_nxtfrm = 0;
                /* treat all packet as event if we don't know */
                rd->channel = SDPCM_EVENT_CHANNEL;
-
-               if (pkt->len == 0) {
-                       brcmu_pkt_buf_free_skb(pkt);
-                       continue;
-               }
-
-               brcmf_rx_frame(bus->sdiodev->dev, pkt);
        }
 
        rxcount = maxframes - rxleft;
@@ -3261,7 +3274,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
                                        const struct firmware *fw,
                                        void *nvram, u32 nvlen)
 {
-       int bcmerror = -EFAULT;
+       int bcmerror;
        u32 rstvec;
 
        sdio_claim_host(bus->sdiodev->func[1]);
index 869eb82db8b1a69c80c55a1fc4c7c702e043866e..98b15a9a2779f450b9bfbf760c734c870c7857fe 100644 (file)
@@ -514,7 +514,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
 
        if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
                skb_put(skb, urb->actual_length);
-               brcmf_rx_frame(devinfo->dev, skb);
+               brcmf_rx_frame(devinfo->dev, skb, true);
                brcmf_usb_rx_refill(devinfo, req);
        } else {
                brcmu_pkt_buf_free_skb(skb);
@@ -1368,7 +1368,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        devinfo->ifnum = desc->bInterfaceNumber;
 
-       if (usb->speed == USB_SPEED_SUPER)
+       if (usb->speed == USB_SPEED_SUPER_PLUS)
+               brcmf_dbg(USB, "Broadcom super speed plus USB WLAN interface detected\n");
+       else if (usb->speed == USB_SPEED_SUPER)
                brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
        else if (usb->speed == USB_SPEED_HIGH)
                brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
index 38bd5890bd53e8f4cb3b1f335004197423583f9a..3a03287fa9122860db4ba371324699bfea344326 100644 (file)
@@ -636,7 +636,7 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
        struct ieee80211_channel *ch;
        int i;
 
-       sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+       sband = wiphy->bands[NL80211_BAND_5GHZ];
        if (!sband)
                return;
 
@@ -666,7 +666,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
        const struct ieee80211_reg_rule *rule;
        int band, i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
@@ -710,7 +710,7 @@ static void brcms_reg_notifier(struct wiphy *wiphy,
                brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
 
        /* Disable radio if all channels disallowed by regulatory */
-       for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
@@ -755,9 +755,9 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
                                              &sup_chan);
 
                if (band_idx == BAND_2G_INDEX)
-                       sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+                       sband = wiphy->bands[NL80211_BAND_2GHZ];
                else
-                       sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+                       sband = wiphy->bands[NL80211_BAND_5GHZ];
 
                for (i = 0; i < sband->n_channels; i++) {
                        ch = &sband->channels[i];
index 61ae2768132a0b16f0f98a57b3319c82adfda95b..7c2a9a9bc372c19502d3bf1d7091ccbd4654b061 100644 (file)
@@ -49,7 +49,7 @@
        FIF_PSPOLL)
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
-       .band = IEEE80211_BAND_2GHZ, \
+       .band = NL80211_BAND_2GHZ, \
        .center_freq = (freqency), \
        .hw_value = (channel), \
        .flags = chflags, \
@@ -58,7 +58,7 @@
 }
 
 #define CHAN5GHZ(channel, chflags)  { \
-       .band = IEEE80211_BAND_5GHZ, \
+       .band = NL80211_BAND_5GHZ, \
        .center_freq = 5000 + 5*(channel), \
        .hw_value = (channel), \
        .flags = chflags, \
@@ -217,7 +217,7 @@ static struct ieee80211_rate legacy_ratetable[] = {
 };
 
 static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
-       .band = IEEE80211_BAND_2GHZ,
+       .band = NL80211_BAND_2GHZ,
        .channels = brcms_2ghz_chantable,
        .n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
        .bitrates = legacy_ratetable,
@@ -238,7 +238,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
 };
 
 static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
-       .band = IEEE80211_BAND_5GHZ,
+       .band = NL80211_BAND_5GHZ,
        .channels = brcms_5ghz_nphy_chantable,
        .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
        .bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
@@ -1026,8 +1026,8 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
        int has_5g = 0;
        u16 phy_type;
 
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
-       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+       hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
        phy_type = brcms_c_get_phy_type(wl->wlc, 0);
        if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
@@ -1038,7 +1038,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
                        band->ht_cap.mcs.rx_mask[1] = 0;
                        band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
                }
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+               hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
        } else {
                return -EPERM;
        }
@@ -1049,7 +1049,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
                if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
                        band = &wlc->bandstate[BAND_5G_INDEX]->band;
                        *band = brcms_band_5GHz_nphy_template;
-                       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+                       hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
                } else {
                        return -EPERM;
                }
index 218cbc8bf3a7c329dda5022de8553e243707ce98..e16ee60639f5673a5494c0f361a6538414403735 100644 (file)
@@ -7076,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
        channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
        rx_status->band =
-               channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+               channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
        rx_status->freq =
                ieee80211_channel_to_frequency(channel, rx_status->band);
 
@@ -7143,7 +7143,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
                 * a subset of the 2.4G rates. See bitrates field
                 * of brcms_band_5GHz_nphy (in mac80211_if.c).
                 */
-               if (rx_status->band == IEEE80211_BAND_5GHZ)
+               if (rx_status->band == NL80211_BAND_5GHZ)
                        rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
 
                /* Determine short preamble and rate_idx */
index d2353f6e52140bf5b9a3de656859afc34221f7c6..55456f7502293252aa107706f56562dc0b5ce59b 100644 (file)
@@ -2026,7 +2026,7 @@ static int mpi_send_packet (struct net_device *dev)
        } else {
                *payloadLen = cpu_to_le16(len - sizeof(etherHead));
 
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
 
                /* copy data into airo dma buffer */
                memcpy(sendbuf, buffer, len);
@@ -2107,7 +2107,7 @@ static void airo_end_xmit(struct net_device *dev) {
 
        i = 0;
        if ( status == SUCCESS ) {
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
        } else {
                priv->fids[fid] &= 0xffff;
@@ -2174,7 +2174,7 @@ static void airo_end_xmit11(struct net_device *dev) {
 
        i = MAX_FIDS / 2;
        if ( status == SUCCESS ) {
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
        } else {
                priv->fids[fid] &= 0xffff;
@@ -5836,7 +5836,7 @@ static int airo_get_freq(struct net_device *dev,
        ch = le16_to_cpu(status_rid.channel);
        if((ch > 0) && (ch < 15)) {
                fwrq->m = 100000 *
-                       ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
+                       ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
                fwrq->e = 1;
        } else {
                fwrq->m = ch;
@@ -6894,7 +6894,7 @@ static int airo_get_range(struct net_device *dev,
        for(i = 0; i < 14; i++) {
                range->freq[k].i = i + 1; /* List index */
                range->freq[k].m = 100000 *
-                    ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
+                    ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
                range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
        }
        range->num_frequency = k;
@@ -7302,7 +7302,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
        iwe.cmd = SIOCGIWFREQ;
        iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
        iwe.u.freq.m = 100000 *
-             ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
+             ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
        iwe.u.freq.e = 1;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
                                          &iwe, IW_EV_FREQ_LEN);
index f93a7f71c0472b9af0362454e4b88f55c95657ec..bfa542c8d6f1a2b4f98ac2c419326ceb3ac7a17d 100644 (file)
@@ -1913,7 +1913,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
        if (geo->bg_channels) {
                struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
 
-               bg_band->band = IEEE80211_BAND_2GHZ;
+               bg_band->band = NL80211_BAND_2GHZ;
                bg_band->n_channels = geo->bg_channels;
                bg_band->channels = kcalloc(geo->bg_channels,
                                            sizeof(struct ieee80211_channel),
@@ -1924,7 +1924,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
                }
                /* translate geo->bg to bg_band.channels */
                for (i = 0; i < geo->bg_channels; i++) {
-                       bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+                       bg_band->channels[i].band = NL80211_BAND_2GHZ;
                        bg_band->channels[i].center_freq = geo->bg[i].freq;
                        bg_band->channels[i].hw_value = geo->bg[i].channel;
                        bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -1945,7 +1945,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
                bg_band->bitrates = ipw2100_bg_rates;
                bg_band->n_bitrates = RATE_COUNT;
 
-               wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+               wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
        }
 
        wdev->wiphy->cipher_suites = ipw_cipher_suites;
@@ -2954,7 +2954,7 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
 
                /* A packet was processed by the hardware, so update the
                 * watchdog */
-               priv->net_dev->trans_start = jiffies;
+               netif_trans_update(priv->net_dev);
 
                break;
 
@@ -3521,7 +3521,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
 static ssize_t show_pci(struct device *d, struct device_attribute *attr,
                        char *buf)
 {
-       struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
+       struct pci_dev *pci_dev = to_pci_dev(d);
        char *out = buf;
        int i, j;
        u32 val;
index ed0adaf1eec445defe056f8509adc6b54b708518..5adb7cefb2feb3804cd1065f0ebd30daf7699954 100644 (file)
@@ -7707,7 +7707,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
        struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
 
        /* We received data from the HW, so stop the watchdog */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* We only process data packets if the
         * interface is open */
@@ -7770,7 +7770,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
        unsigned short len = le16_to_cpu(pkt->u.frame.length);
 
        /* We received data from the HW, so stop the watchdog */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* We only process data packets if the
         * interface is open */
@@ -7952,7 +7952,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
                return;
 
        /* We received data from the HW, so stop the watchdog */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
                dev->stats.rx_errors++;
@@ -11359,7 +11359,7 @@ static int ipw_wdev_init(struct net_device *dev)
        if (geo->bg_channels) {
                struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
 
-               bg_band->band = IEEE80211_BAND_2GHZ;
+               bg_band->band = NL80211_BAND_2GHZ;
                bg_band->n_channels = geo->bg_channels;
                bg_band->channels = kcalloc(geo->bg_channels,
                                            sizeof(struct ieee80211_channel),
@@ -11370,7 +11370,7 @@ static int ipw_wdev_init(struct net_device *dev)
                }
                /* translate geo->bg to bg_band.channels */
                for (i = 0; i < geo->bg_channels; i++) {
-                       bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+                       bg_band->channels[i].band = NL80211_BAND_2GHZ;
                        bg_band->channels[i].center_freq = geo->bg[i].freq;
                        bg_band->channels[i].hw_value = geo->bg[i].channel;
                        bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -11391,14 +11391,14 @@ static int ipw_wdev_init(struct net_device *dev)
                bg_band->bitrates = ipw2200_bg_rates;
                bg_band->n_bitrates = ipw2200_num_bg_rates;
 
-               wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+               wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
        }
 
        /* fill-out priv->ieee->a_band */
        if (geo->a_channels) {
                struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
 
-               a_band->band = IEEE80211_BAND_5GHZ;
+               a_band->band = NL80211_BAND_5GHZ;
                a_band->n_channels = geo->a_channels;
                a_band->channels = kcalloc(geo->a_channels,
                                           sizeof(struct ieee80211_channel),
@@ -11409,7 +11409,7 @@ static int ipw_wdev_init(struct net_device *dev)
                }
                /* translate geo->a to a_band.channels */
                for (i = 0; i < geo->a_channels; i++) {
-                       a_band->channels[i].band = IEEE80211_BAND_5GHZ;
+                       a_band->channels[i].band = NL80211_BAND_5GHZ;
                        a_band->channels[i].center_freq = geo->a[i].freq;
                        a_band->channels[i].hw_value = geo->a[i].channel;
                        a_band->channels[i].max_power = geo->a[i].max_power;
@@ -11430,7 +11430,7 @@ static int ipw_wdev_init(struct net_device *dev)
                a_band->bitrates = ipw2200_a_rates;
                a_band->n_bitrates = ipw2200_num_a_rates;
 
-               wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+               wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
        }
 
        wdev->wiphy->cipher_suites = ipw_cipher_suites;
index af1b3e6839fa6db3e69e90bee7a50923f1d0230e..466912eb2d874a667fff2394022a748f943d3712 100644 (file)
@@ -1547,7 +1547,7 @@ il3945_irq_tasklet(struct il_priv *il)
 }
 
 static int
-il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
                             u8 is_active, u8 n_probes,
                             struct il3945_scan_channel *scan_ch,
                             struct ieee80211_vif *vif)
@@ -1618,7 +1618,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
                /* scan_pwr_info->tpc.dsp_atten; */
 
                /*scan_pwr_info->tpc.tx_gain; */
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
                else {
                        scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -2534,7 +2534,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
        };
        struct il3945_scan_cmd *scan;
        u8 n_probes = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        bool is_active = false;
        int ret;
        u16 len;
@@ -2615,14 +2615,14 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
        /* flags + rate selection */
 
        switch (il->scan_band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
                scan->tx_cmd.rate = RATE_1M_PLCP;
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                scan->tx_cmd.rate = RATE_6M_PLCP;
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
                break;
        default:
                IL_WARN("Invalid scan band\n");
@@ -3507,7 +3507,7 @@ il3945_init_drv(struct il_priv *il)
 
        il->ieee_channels = NULL;
        il->ieee_rates = NULL;
-       il->band = IEEE80211_BAND_2GHZ;
+       il->band = NL80211_BAND_2GHZ;
 
        il->iw_mode = NL80211_IFTYPE_STATION;
        il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
@@ -3582,13 +3582,13 @@ il3945_setup_mac(struct il_priv *il)
        /* Default value; 4 EDCA QOS priorities */
        hw->queues = 4;
 
-       if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
-               il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                   &il->bands[IEEE80211_BAND_2GHZ];
+       if (il->bands[NL80211_BAND_2GHZ].n_channels)
+               il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                   &il->bands[NL80211_BAND_2GHZ];
 
-       if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
-               il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                   &il->bands[IEEE80211_BAND_5GHZ];
+       if (il->bands[NL80211_BAND_5GHZ].n_channels)
+               il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                   &il->bands[NL80211_BAND_5GHZ];
 
        il_leds_init(il);
 
@@ -3761,7 +3761,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_release_irq;
        }
 
-       il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
+       il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
        il3945_setup_deferred_work(il);
        il3945_setup_handlers(il);
        il_power_initialize(il);
index 76b0729ade17ee2d18cc42df934f286762c2c155..03ad9b8b55f4e2e6052a1ae8b9b99bb29d2662f4 100644 (file)
@@ -97,7 +97,7 @@ static struct il3945_tpt_entry il3945_tpt_table_g[] = {
 #define RATE_RETRY_TH          15
 
 static u8
-il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
 {
        u32 idx = 0;
        u32 table_size = 0;
@@ -107,11 +107,11 @@ il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
                rssi = IL_MIN_RSSI_VAL;
 
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                tpt_table = il3945_tpt_table_g;
                table_size = ARRAY_SIZE(il3945_tpt_table_g);
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                tpt_table = il3945_tpt_table_a;
                table_size = ARRAY_SIZE(il3945_tpt_table_a);
                break;
@@ -380,7 +380,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
 
        il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
        /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
-       if (sband->band == IEEE80211_BAND_5GHZ) {
+       if (sband->band == NL80211_BAND_5GHZ) {
                rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
                il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
        }
@@ -541,7 +541,7 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
 
 static u16
 il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
-                        enum ieee80211_band band)
+                        enum nl80211_band band)
 {
        u8 high = RATE_INVALID;
        u8 low = RATE_INVALID;
@@ -549,7 +549,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
 
        /* 802.11A walks to the next literal adjacent rate in
         * the rate table */
-       if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+       if (unlikely(band == NL80211_BAND_5GHZ)) {
                int i;
                u32 mask;
 
@@ -657,14 +657,14 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
 
        /* get user max rate if set */
        max_rate_idx = txrc->max_rate_idx;
-       if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+       if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
                max_rate_idx += IL_FIRST_OFDM_RATE;
        if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
                max_rate_idx = -1;
 
        idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
 
-       if (sband->band == IEEE80211_BAND_5GHZ)
+       if (sband->band == NL80211_BAND_5GHZ)
                rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
 
        spin_lock_irqsave(&rs_sta->lock, flags);
@@ -806,7 +806,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
 
 out:
 
-       if (sband->band == IEEE80211_BAND_5GHZ) {
+       if (sband->band == NL80211_BAND_5GHZ) {
                if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
                        idx = IL_FIRST_OFDM_RATE;
                rs_sta->last_txrate_idx = idx;
@@ -935,7 +935,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
 
        rs_sta->tgg = 0;
        switch (il->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                /* TODO: this always does G, not a regression */
                if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
                        rs_sta->tgg = 1;
@@ -943,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
                } else
                        rs_sta->expected_tpt = il3945_expected_tpt_g;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                rs_sta->expected_tpt = il3945_expected_tpt_a;
                break;
        default:
index 93bdf684babe36392daa84eca81e7d52a529d8a5..7bcedbb53d9482ecbe6462aefcf47608df946ec8 100644 (file)
@@ -255,13 +255,13 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
        int next_rate = il3945_get_prev_ieee_rate(rate);
 
        switch (il->band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                if (rate == RATE_12M_IDX)
                        next_rate = RATE_9M_IDX;
                else if (rate == RATE_6M_IDX)
                        next_rate = RATE_6M_IDX;
                break;
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
                    il_is_associated(il)) {
                        if (rate == RATE_11M_IDX)
@@ -349,7 +349,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
 
        /* Fill the MRR chain with some info about on-chip retransmissions */
        rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
-       if (info->band == IEEE80211_BAND_5GHZ)
+       if (info->band == NL80211_BAND_5GHZ)
                rate_idx -= IL_FIRST_OFDM_RATE;
 
        fail = tx_resp->failure_frame;
@@ -554,14 +554,14 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
        rx_status.mactime = le64_to_cpu(rx_end->timestamp);
        rx_status.band =
            (rx_hdr->
-            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
-           IEEE80211_BAND_5GHZ;
+            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+           NL80211_BAND_5GHZ;
        rx_status.freq =
            ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
                                           rx_status.band);
 
        rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
-       if (rx_status.band == IEEE80211_BAND_5GHZ)
+       if (rx_status.band == NL80211_BAND_5GHZ)
                rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
 
        rx_status.antenna =
@@ -1409,7 +1409,7 @@ il3945_send_tx_power(struct il_priv *il)
 
        chan = le16_to_cpu(il->active.channel);
 
-       txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+       txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
        ch_info = il_get_channel_info(il, il->band, chan);
        if (!ch_info) {
                IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
@@ -2310,7 +2310,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
 
                il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
                                (il->band ==
-                                IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+                                NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
                                RATE_1M_PLCP);
                il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
 
@@ -2343,7 +2343,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
        }
 
        switch (il->band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                D_RATE("Select A mode rate scale\n");
                /* If one of the following CCK rates is used,
                 * have it fall back to the 6M OFDM rate */
@@ -2359,7 +2359,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
                    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
                break;
 
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                D_RATE("Select B/G mode rate scale\n");
                /* If an OFDM rate is used, have it fall back to the
                 * 1M CCK rates */
index b75f4ef3cdc7b278ec837f83fd576444f5ce0008..a91d170a614b63181d6ca2c45a7038da2cb4fb4c 100644 (file)
@@ -457,7 +457,7 @@ il4965_rxq_stop(struct il_priv *il)
 }
 
 int
-il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
 {
        int idx = 0;
        int band_offset = 0;
@@ -468,7 +468,7 @@ il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
                return idx;
                /* Legacy rate format, search for match in table */
        } else {
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        band_offset = IL_FIRST_OFDM_RATE;
                for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
                        if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -688,8 +688,8 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
        rx_status.mactime = le64_to_cpu(phy_res->timestamp);
        rx_status.band =
            (phy_res->
-            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
-           IEEE80211_BAND_5GHZ;
+            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+           NL80211_BAND_5GHZ;
        rx_status.freq =
            ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
                                           rx_status.band);
@@ -766,7 +766,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
 
 static int
 il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
-                            enum ieee80211_band band, u8 is_active,
+                            enum nl80211_band band, u8 is_active,
                             u8 n_probes, struct il_scan_channel *scan_ch)
 {
        struct ieee80211_channel *chan;
@@ -822,7 +822,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
                 * power level:
                 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
                 */
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
                else
                        scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -870,7 +870,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
        u32 rate_flags = 0;
        u16 cmd_len;
        u16 rx_chain = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u8 n_probes = 0;
        u8 rx_ant = il->hw_params.valid_rx_ant;
        u8 rate;
@@ -944,7 +944,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
        scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 
        switch (il->scan_band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
                chan_mod =
                    le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
@@ -956,7 +956,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
                        rate_flags = RATE_MCS_CCK_MSK;
                }
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                rate = RATE_6M_PLCP;
                break;
        default:
@@ -1590,7 +1590,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
            || rate_idx > RATE_COUNT_LEGACY)
                rate_idx = rate_lowest_index(&il->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-       if (info->band == IEEE80211_BAND_5GHZ)
+       if (info->band == NL80211_BAND_5GHZ)
                rate_idx += IL_FIRST_OFDM_RATE;
        /* Get PLCP rate for tx_cmd->rate_n_flags */
        rate_plcp = il_rates[rate_idx].plcp;
@@ -3051,7 +3051,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
        }
        /* Set up the rate scaling to start at selected rate, fall back
         * all the way down to 1M in IEEE order, and then spin on 1M */
-       if (il->band == IEEE80211_BAND_5GHZ)
+       if (il->band == NL80211_BAND_5GHZ)
                r = RATE_6M_IDX;
        else
                r = RATE_1M_IDX;
@@ -5553,6 +5553,7 @@ __il4965_up(struct il_priv *il)
 
        il4965_prepare_card_hw(il);
        if (!il->hw_ready) {
+               il_dealloc_bcast_stations(il);
                IL_ERR("HW not ready\n");
                return -EIO;
        }
@@ -5564,6 +5565,7 @@ __il4965_up(struct il_priv *il)
                set_bit(S_RFKILL, &il->status);
                wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
 
+               il_dealloc_bcast_stations(il);
                il_enable_rfkill_int(il);
                IL_WARN("Radio disabled by HW RF Kill switch\n");
                return 0;
@@ -5577,6 +5579,7 @@ __il4965_up(struct il_priv *il)
        ret = il4965_hw_nic_init(il);
        if (ret) {
                IL_ERR("Unable to init nic\n");
+               il_dealloc_bcast_stations(il);
                return ret;
        }
 
@@ -5787,12 +5790,12 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
 
        hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
 
-       if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
-               il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                   &il->bands[IEEE80211_BAND_2GHZ];
-       if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
-               il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                   &il->bands[IEEE80211_BAND_5GHZ];
+       if (il->bands[NL80211_BAND_2GHZ].n_channels)
+               il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                   &il->bands[NL80211_BAND_2GHZ];
+       if (il->bands[NL80211_BAND_5GHZ].n_channels)
+               il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                   &il->bands[NL80211_BAND_5GHZ];
 
        il_leds_init(il);
 
@@ -6365,7 +6368,7 @@ il4965_init_drv(struct il_priv *il)
 
        il->ieee_channels = NULL;
        il->ieee_rates = NULL;
-       il->band = IEEE80211_BAND_2GHZ;
+       il->band = NL80211_BAND_2GHZ;
 
        il->iw_mode = NL80211_IFTYPE_STATION;
        il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -6477,7 +6480,7 @@ il4965_set_hw_params(struct il_priv *il)
        il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
        il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
        il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
-       il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+       il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
 
        il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
 
index bac60b2bc3f014a53668841e934f5062207f4080..a867ae7f4095b80c93ca644ebf5e130a4250c143 100644 (file)
@@ -549,7 +549,7 @@ il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
  */
 static int
 il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-                               enum ieee80211_band band,
+                               enum nl80211_band band,
                                struct il_scale_tbl_info *tbl, int *rate_idx)
 {
        u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -574,7 +574,7 @@ il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
        /* legacy rate format */
        if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
                if (il4965_num_of_ant == 1) {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                tbl->lq_type = LQ_A;
                        else
                                tbl->lq_type = LQ_G;
@@ -743,7 +743,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
        if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
                switch_to_legacy = 1;
                scale_idx = rs_ht_to_legacy[scale_idx];
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        tbl->lq_type = LQ_A;
                else
                        tbl->lq_type = LQ_G;
@@ -762,7 +762,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
        /* Mask with station rate restriction */
        if (is_legacy(tbl->lq_type)) {
                /* supp_rates has no CCK bits in A mode */
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        rate_mask =
                            (u16) (rate_mask &
                                   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
@@ -851,7 +851,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
        table = &lq_sta->lq;
        tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
        il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
-       if (il->band == IEEE80211_BAND_5GHZ)
+       if (il->band == NL80211_BAND_5GHZ)
                rs_idx -= IL_FIRST_OFDM_RATE;
        mac_flags = info->status.rates[0].flags;
        mac_idx = info->status.rates[0].idx;
@@ -864,7 +864,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
                 * mac80211 HT idx is always zero-idxed; we need to move
                 * HT OFDM rates after CCK rates in 2.4 GHz band
                 */
-               if (il->band == IEEE80211_BAND_2GHZ)
+               if (il->band == NL80211_BAND_2GHZ)
                        mac_idx += IL_FIRST_OFDM_RATE;
        }
        /* Here we actually compare this rate to the latest LQ command */
@@ -1816,7 +1816,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
 
        /* mask with station rate restriction */
        if (is_legacy(tbl->lq_type)) {
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        /* supp_rates has no CCK bits in A mode */
                        rate_scale_idx_msk =
                            (u16) (rate_mask &
@@ -2212,7 +2212,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
        /* Get max rate if user set max rate */
        if (lq_sta) {
                lq_sta->max_rate_idx = txrc->max_rate_idx;
-               if (sband->band == IEEE80211_BAND_5GHZ &&
+               if (sband->band == NL80211_BAND_5GHZ &&
                    lq_sta->max_rate_idx != -1)
                        lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
                if (lq_sta->max_rate_idx < 0 ||
@@ -2258,11 +2258,11 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
        } else {
                /* Check for invalid rates */
                if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
-                   (sband->band == IEEE80211_BAND_5GHZ &&
+                   (sband->band == NL80211_BAND_5GHZ &&
                     rate_idx < IL_FIRST_OFDM_RATE))
                        rate_idx = rate_lowest_index(sband, sta);
                /* On valid 5 GHz rate, adjust idx */
-               else if (sband->band == IEEE80211_BAND_5GHZ)
+               else if (sband->band == NL80211_BAND_5GHZ)
                        rate_idx -= IL_FIRST_OFDM_RATE;
                info->control.rates[0].flags = 0;
        }
@@ -2362,7 +2362,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
 
        /* Set last_txrate_idx to lowest rate */
        lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-       if (sband->band == IEEE80211_BAND_5GHZ)
+       if (sband->band == NL80211_BAND_5GHZ)
                lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
        lq_sta->is_agg = 0;
 
index fe47db9c20cd2428549b6c520920abed84140487..c3c638ed0ed72e8c2f6c95929afc445f5c361d84 100644 (file)
@@ -1267,7 +1267,7 @@ il4965_send_tx_power(struct il_priv *il)
             "TX Power requested while scanning!\n"))
                return -EAGAIN;
 
-       band = il->band == IEEE80211_BAND_2GHZ;
+       band = il->band == NL80211_BAND_2GHZ;
 
        is_ht40 = iw4965_is_ht40_channel(il->active.flags);
 
@@ -1480,7 +1480,7 @@ il4965_hw_channel_switch(struct il_priv *il,
        u8 switch_count;
        u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
        struct ieee80211_vif *vif = il->vif;
-       band = (il->band == IEEE80211_BAND_2GHZ);
+       band = (il->band == NL80211_BAND_2GHZ);
 
        if (WARN_ON_ONCE(vif == NULL))
                return -EIO;
@@ -1918,7 +1918,7 @@ struct il_cfg il4965_cfg = {
         * Force use of chains B and C for scan RX on 5 GHz band
         * because the device has off-channel reception on chain A.
         */
-       .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+       .scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
 
        .eeprom_size = IL4965_EEPROM_IMG_SIZE,
        .num_of_queues = IL49_NUM_QUEUES,
index e432715e02d89dedcce89615d855233662c26d82..527e8b531aed2b2691b1a0bba03421f8f255b060 100644 (file)
@@ -68,7 +68,7 @@ void il4965_rx_replenish(struct il_priv *il);
 void il4965_rx_replenish_now(struct il_priv *il);
 void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
 int il4965_rxq_stop(struct il_priv *il);
-int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
 void il4965_rx_handle(struct il_priv *il);
 
 /* tx */
index eb5cb603bc52990f48cbf1f55c8485f1c641b6b4..eb24b9241bb259ce2acb6632c05c2ddccc656662 100644 (file)
@@ -723,10 +723,9 @@ il_eeprom_init(struct il_priv *il)
        sz = il->cfg->eeprom_size;
        D_EEPROM("NVM size = %d\n", sz);
        il->eeprom = kzalloc(sz, GFP_KERNEL);
-       if (!il->eeprom) {
-               ret = -ENOMEM;
-               goto alloc_err;
-       }
+       if (!il->eeprom)
+               return -ENOMEM;
+
        e = (__le16 *) il->eeprom;
 
        il->ops->apm_init(il);
@@ -778,7 +777,6 @@ err:
                il_eeprom_free(il);
        /* Reset chip to save power until we load uCode during "up". */
        il_apm_stop(il);
-alloc_err:
        return ret;
 }
 EXPORT_SYMBOL(il_eeprom_init);
@@ -862,7 +860,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
  * Does not set up a command, or touch hardware.
  */
 static int
-il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
                      const struct il_eeprom_channel *eeprom_ch,
                      u8 clear_ht40_extension_channel)
 {
@@ -947,7 +945,7 @@ il_init_channel_map(struct il_priv *il)
                        ch_info->channel = eeprom_ch_idx[ch];
                        ch_info->band =
                            (band ==
-                            1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                            1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 
                        /* permanently store EEPROM's channel regulatory flags
                         *   and max power in channel info database. */
@@ -1005,14 +1003,14 @@ il_init_channel_map(struct il_priv *il)
 
        /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
        for (band = 6; band <= 7; band++) {
-               enum ieee80211_band ieeeband;
+               enum nl80211_band ieeeband;
 
                il_init_band_reference(il, band, &eeprom_ch_count,
                                       &eeprom_ch_info, &eeprom_ch_idx);
 
                /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
                ieeeband =
-                   (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                   (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 
                /* Loop through each band adding each of the channels */
                for (ch = 0; ch < eeprom_ch_count; ch++) {
@@ -1050,19 +1048,19 @@ EXPORT_SYMBOL(il_free_channel_map);
  * Based on band and channel number.
  */
 const struct il_channel_info *
-il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
                    u16 channel)
 {
        int i;
 
        switch (band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                for (i = 14; i < il->channel_count; i++) {
                        if (il->channel_info[i].channel == channel)
                                return &il->channel_info[i];
                }
                break;
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                if (channel >= 1 && channel <= 14)
                        return &il->channel_info[channel - 1];
                break;
@@ -1459,7 +1457,7 @@ il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
        clear_bit(S_SCAN_HW, &il->status);
 
        D_SCAN("Scan on %sGHz took %dms\n",
-              (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+              (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
               jiffies_to_msecs(jiffies - il->scan_start));
 
        queue_work(il->workqueue, &il->scan_completed);
@@ -1477,10 +1475,10 @@ il_setup_rx_scan_handlers(struct il_priv *il)
 EXPORT_SYMBOL(il_setup_rx_scan_handlers);
 
 u16
-il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
                         u8 n_probes)
 {
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                return IL_ACTIVE_DWELL_TIME_52 +
                    IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
        else
@@ -1490,14 +1488,14 @@ il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
 EXPORT_SYMBOL(il_get_active_dwell_time);
 
 u16
-il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
                          struct ieee80211_vif *vif)
 {
        u16 value;
 
        u16 passive =
            (band ==
-            IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+            NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
            IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
            IL_PASSIVE_DWELL_TIME_52;
 
@@ -1522,10 +1520,10 @@ void
 il_init_scan_params(struct il_priv *il)
 {
        u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
-       if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
-               il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-       if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
-               il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+       if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
+               il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+       if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
+               il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
 }
 EXPORT_SYMBOL(il_init_scan_params);
 
@@ -2005,7 +2003,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
        il_set_ht_add_station(il, sta_id, sta);
 
        /* 3945 only */
-       rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+       rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
        /* Turn on both antennas for the station... */
        station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
 
@@ -2794,8 +2792,10 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
        il_tx_queue_unmap(il, txq_id);
 
        /* De-alloc array of command/tx buffers */
-       for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
-               kfree(txq->cmd[i]);
+       if (txq->cmd) {
+               for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+                       kfree(txq->cmd[i]);
+       }
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd)
@@ -2873,8 +2873,10 @@ il_cmd_queue_free(struct il_priv *il)
        il_cmd_queue_unmap(il);
 
        /* De-alloc array of command/tx buffers */
-       for (i = 0; i <= TFD_CMD_SLOTS; i++)
-               kfree(txq->cmd[i]);
+       if (txq->cmd) {
+               for (i = 0; i <= TFD_CMD_SLOTS; i++)
+                       kfree(txq->cmd[i]);
+       }
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd)
@@ -3080,7 +3082,9 @@ err:
                kfree(txq->cmd[i]);
 out_free_arrays:
        kfree(txq->meta);
+       txq->meta = NULL;
        kfree(txq->cmd);
+       txq->cmd = NULL;
 
        return -ENOMEM;
 }
@@ -3378,7 +3382,7 @@ EXPORT_SYMBOL(il_bcast_addr);
 static void
 il_init_ht_hw_capab(const struct il_priv *il,
                    struct ieee80211_sta_ht_cap *ht_info,
-                   enum ieee80211_band band)
+                   enum nl80211_band band)
 {
        u16 max_bit_rate = 0;
        u8 rx_chains_num = il->hw_params.rx_chains_num;
@@ -3439,8 +3443,8 @@ il_init_geos(struct il_priv *il)
        int i = 0;
        s8 max_tx_power = 0;
 
-       if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
-           il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+       if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
+           il->bands[NL80211_BAND_5GHZ].n_bitrates) {
                D_INFO("Geography modes already initialized.\n");
                set_bit(S_GEO_CONFIGURED, &il->status);
                return 0;
@@ -3461,23 +3465,23 @@ il_init_geos(struct il_priv *il)
        }
 
        /* 5.2GHz channels start after the 2.4GHz channels */
-       sband = &il->bands[IEEE80211_BAND_5GHZ];
+       sband = &il->bands[NL80211_BAND_5GHZ];
        sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
        /* just OFDM */
        sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
        sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
 
        if (il->cfg->sku & IL_SKU_N)
-               il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+               il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
 
-       sband = &il->bands[IEEE80211_BAND_2GHZ];
+       sband = &il->bands[NL80211_BAND_2GHZ];
        sband->channels = channels;
        /* OFDM & CCK */
        sband->bitrates = rates;
        sband->n_bitrates = RATE_COUNT_LEGACY;
 
        if (il->cfg->sku & IL_SKU_N)
-               il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+               il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
 
        il->ieee_channels = channels;
        il->ieee_rates = rates;
@@ -3528,7 +3532,7 @@ il_init_geos(struct il_priv *il)
        il->tx_power_user_lmt = max_tx_power;
        il->tx_power_next = max_tx_power;
 
-       if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+       if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
            (il->cfg->sku & IL_SKU_A)) {
                IL_INFO("Incorrectly detected BG card as ABG. "
                        "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
@@ -3537,8 +3541,8 @@ il_init_geos(struct il_priv *il)
        }
 
        IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
-               il->bands[IEEE80211_BAND_2GHZ].n_channels,
-               il->bands[IEEE80211_BAND_5GHZ].n_channels);
+               il->bands[NL80211_BAND_2GHZ].n_channels,
+               il->bands[NL80211_BAND_5GHZ].n_channels);
 
        set_bit(S_GEO_CONFIGURED, &il->status);
 
@@ -3559,7 +3563,7 @@ il_free_geos(struct il_priv *il)
 EXPORT_SYMBOL(il_free_geos);
 
 static bool
-il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
                        u16 channel, u8 extension_chan_offset)
 {
        const struct il_channel_info *ch_info;
@@ -3922,14 +3926,14 @@ EXPORT_SYMBOL(il_set_rxon_ht);
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 u8
-il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
 {
        const struct il_channel_info *ch_info;
        int i;
        u8 channel = 0;
        u8 min, max;
 
-       if (band == IEEE80211_BAND_5GHZ) {
+       if (band == NL80211_BAND_5GHZ) {
                min = 14;
                max = il->channel_count;
        } else {
@@ -3961,14 +3965,14 @@ EXPORT_SYMBOL(il_get_single_channel_number);
 int
 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
 {
-       enum ieee80211_band band = ch->band;
+       enum nl80211_band band = ch->band;
        u16 channel = ch->hw_value;
 
        if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
                return 0;
 
        il->staging.channel = cpu_to_le16(channel);
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
        else
                il->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -3982,10 +3986,10 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
 EXPORT_SYMBOL(il_set_rxon_channel);
 
 void
-il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
                      struct ieee80211_vif *vif)
 {
-       if (band == IEEE80211_BAND_5GHZ) {
+       if (band == NL80211_BAND_5GHZ) {
                il->staging.flags &=
                    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
                      RXON_FLG_CCK_MSK);
@@ -5411,7 +5415,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
        if (changes & BSS_CHANGED_ERP_CTS_PROT) {
                D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+               if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
                        il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
                else
                        il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
index ce52cf114fdebff1e2640e90cecd878ecf24b653..726ede391cb90e788496d348d08cb8a2fe936be3 100644 (file)
@@ -432,7 +432,7 @@ u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
 int il_init_channel_map(struct il_priv *il);
 void il_free_channel_map(struct il_priv *il);
 const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
-                                                 enum ieee80211_band band,
+                                                 enum nl80211_band band,
                                                  u16 channel);
 
 #define IL_NUM_SCAN_RATES         (2)
@@ -497,7 +497,7 @@ struct il_channel_info {
 
        u8 group_idx;           /* 0-4, maps channel to group1/2/3/4/5 */
        u8 band_idx;            /* 0-4, maps channel to band1/2/3/4/5 */
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        /* HT40 channel info */
        s8 ht40_max_power_avg;  /* (dBm) regul. eeprom, normal Tx, any rate */
@@ -811,7 +811,7 @@ struct il_sensitivity_ranges {
  * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
  * @max_stations:
  * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
  * @sw_crypto: 0 for hw, 1 for sw
  * @max_xxx_size: for ucode uses
  * @ct_kill_threshold: temperature threshold
@@ -1141,13 +1141,13 @@ struct il_priv {
        struct list_head free_frames;
        int frames_count;
 
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int alloc_rxb_page;
 
        void (*handlers[IL_CN_MAX]) (struct il_priv *il,
                                     struct il_rx_buf *rxb);
 
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
 
        /* spectrum measurement report caching */
        struct il_spectrum_notification measure_report;
@@ -1176,10 +1176,10 @@ struct il_priv {
        unsigned long scan_start;
        unsigned long scan_start_tsf;
        void *scan_cmd;
-       enum ieee80211_band scan_band;
+       enum nl80211_band scan_band;
        struct cfg80211_scan_request *scan_request;
        struct ieee80211_vif *scan_vif;
-       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+       u8 scan_tx_ant[NUM_NL80211_BANDS];
        u8 mgmt_tx_ant;
 
        /* spinlock */
@@ -1479,7 +1479,7 @@ il_is_channel_radar(const struct il_channel_info *ch_info)
 static inline u8
 il_is_channel_a_band(const struct il_channel_info *ch_info)
 {
-       return ch_info->band == IEEE80211_BAND_5GHZ;
+       return ch_info->band == NL80211_BAND_5GHZ;
 }
 
 static inline int
@@ -1673,7 +1673,7 @@ struct il_cfg {
        /* params not likely to change within a device family */
        struct il_base_params *base_params;
        /* params likely to change within a device family */
-       u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+       u8 scan_rx_antennas[NUM_NL80211_BANDS];
        enum il_led_mode led_mode;
 
        int eeprom_size;
@@ -1707,9 +1707,9 @@ void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
 int il_check_rxon_cmd(struct il_priv *il);
 int il_full_rxon_required(struct il_priv *il);
 int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
-void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
                           struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
 void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
 bool il_is_ht40_tx_allowed(struct il_priv *il,
                           struct ieee80211_sta_ht_cap *ht_cap);
@@ -1793,9 +1793,9 @@ int il_force_reset(struct il_priv *il, bool external);
 u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
                      const u8 *ta, const u8 *ie, int ie_len, int left);
 void il_setup_rx_scan_handlers(struct il_priv *il);
-u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
                             u8 n_probes);
-u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
                              struct ieee80211_vif *vif);
 void il_setup_scan_deferred_work(struct il_priv *il);
 void il_cancel_scan_deferred_work(struct il_priv *il);
@@ -1955,7 +1955,7 @@ il_commit_rxon(struct il_priv *il)
 }
 
 static inline const struct ieee80211_supported_band *
-il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
 {
        return il->hw->wiphy->bands[band];
 }
@@ -2813,7 +2813,7 @@ struct il_lq_sta {
        u8 action_counter;      /* # mode-switch actions tried */
        u8 is_green;
        u8 is_dup;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
        u32 supp_rates;
index 908b9f4fef6f3b4e3522b8f2c5a03166ca0cf2b8..6fc6b7ff9849b9650b3ca2f42ef8e845e8406eec 100644 (file)
@@ -544,7 +544,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
                return -ENOMEM;
        }
 
-       supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+       supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
        if (supp_band) {
                channels = supp_band->channels;
 
@@ -571,7 +571,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
                                      flags & IEEE80211_CHAN_NO_IR ?
                                      "passive only" : "active/passive");
        }
-       supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+       supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
        if (supp_band) {
                channels = supp_band->channels;
 
index 16c4f383488fcc5160d22dca5980d72bb1a78edd..492035f406e9fa633a9371f697fd6281d6122c7f 100644 (file)
@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
          If unsure, don't enable this option, as some programs might
          expect incoming broadcasts for their normal operations.
 
-config IWLWIFI_UAPSD
-       bool "enable U-APSD by default"
-       depends on IWLMVM
-       help
-         Say Y here to enable U-APSD by default. This may cause
-         interoperability problems with some APs, manifesting in lower than
-         expected throughput due to those APs not enabling aggregation
-
-         If unsure, say N.
-
 config IWLWIFI_PCIE_RTPM
        bool "Enable runtime power management mode for PCIe devices"
        depends on IWLMVM && PM
index 9de277c6c420aecbbe49452160a0156ecd1e4117..b79e38734f2f64b241c7c7d42598b49cbbaaf760 100644 (file)
@@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
                         struct iwl_rxon_context *ctx);
 void iwl_set_flags_for_band(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
-                           enum ieee80211_band band,
+                           enum nl80211_band band,
                            struct ieee80211_vif *vif);
 
 /* uCode */
@@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv,
                                u8 flags, bool clear);
 
 static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
-                       struct iwl_priv *priv, enum ieee80211_band band)
+                       struct iwl_priv *priv, enum nl80211_band band)
 {
        return priv->hw->wiphy->bands[band];
 }
@@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
 #endif
 
 /* rx */
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
 void iwl_setup_rx_handlers(struct iwl_priv *priv);
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
@@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
 int __must_check iwl_scan_initiate(struct iwl_priv *priv,
                                   struct ieee80211_vif *vif,
                                   enum iwl_scan_type scan_type,
-                                  enum ieee80211_band band);
+                                  enum nl80211_band band);
 
 /* For faster active scanning, scan will move to the next channel if fewer than
  * PLCP_QUIET_THRESH packets are heard on this channel within
index 74c51615244e1f8d25dedce4413dd88d7356b0bc..f6591c83d636bcab4b49f415e0ddd1a9052326c9 100644 (file)
@@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+       supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
        if (supp_band) {
                channels = supp_band->channels;
 
@@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
                                        IEEE80211_CHAN_NO_IR ?
                                        "passive only" : "active/passive");
        }
-       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+       supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
        if (supp_band) {
                channels = supp_band->channels;
 
index 1a7ead753eee9a463b778fdce09bba02f7128ee6..8148df61a91679eb3e585ee4e8cfe0ae9fc217bb 100644 (file)
@@ -677,7 +677,7 @@ struct iwl_priv {
 
        struct iwl_hw_params hw_params;
 
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u8 valid_contexts;
 
        void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
@@ -722,11 +722,11 @@ struct iwl_priv {
        unsigned long scan_start;
        unsigned long scan_start_tsf;
        void *scan_cmd;
-       enum ieee80211_band scan_band;
+       enum nl80211_band scan_band;
        struct cfg80211_scan_request *scan_request;
        struct ieee80211_vif *scan_vif;
        enum iwl_scan_type scan_type;
-       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+       u8 scan_tx_ant[NUM_NL80211_BANDS];
        u8 mgmt_tx_ant;
 
        /* max number of station keys */
index cc13c04063a5a70401771eea3fe9551552d05f55..f21732ec3b2529017b151b374f2627e5b257e956 100644 (file)
@@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
                .data = { &cmd, },
        };
 
-       cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+       cmd.band = priv->band == NL80211_BAND_2GHZ;
        ch = ch_switch->chandef.chan->hw_value;
        IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
                      ctx->active.channel, ch);
@@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
 
        hcmd.data[0] = cmd;
 
-       cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+       cmd->band = priv->band == NL80211_BAND_2GHZ;
        ch = ch_switch->chandef.chan->hw_value;
        IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
                      ctx->active.channel, ch);
index 1799469268ea8e8a24d8d3fd33b0221b92ccfcd3..8dda52ae3bb5015cd5c1f2409482b3bbb607d777 100644 (file)
@@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
        iwl_tt_handler(priv);
 }
 
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
 {
        int idx = 0;
        int band_offset = 0;
@@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
                return idx;
        /* Legacy rate format, search for match in table */
        } else {
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        band_offset = IWL_FIRST_OFDM_RATE;
                for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
                        if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
        int i;
        u8 ind = ant;
 
-       if (priv->band == IEEE80211_BAND_2GHZ &&
+       if (priv->band == NL80211_BAND_2GHZ &&
            priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
                return 0;
 
index c63ea79571ff5f0d6b0bbfd07b4a6faba42f1593..8c0719468d00514572a0cc6e76c4f084f850a335 100644 (file)
@@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
 
        hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
 
-       if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
-       if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
+       if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+               priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                       &priv->nvm_data->bands[NL80211_BAND_2GHZ];
+       if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+               priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                       &priv->nvm_data->bands[NL80211_BAND_5GHZ];
 
        hw->wiphy->hw_version = priv->trans->hw_id;
 
index 85628127947fb251712710b1c81868975c4d0404..37b32a6f60fdd889478fea076ec76d5e546ecf8b 100644 (file)
@@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
        rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 
        /* In mac80211, rates for 5 GHz start at 0 */
-       if (info->band == IEEE80211_BAND_5GHZ)
+       if (info->band == NL80211_BAND_5GHZ)
                rate += IWL_FIRST_OFDM_RATE;
        else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
                rate_flags |= RATE_MCS_CCK_MSK;
@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
 
 static void iwl_setup_deferred_work(struct iwl_priv *priv)
 {
-       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+       priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
 
        INIT_WORK(&priv->restart, iwl_bg_restart);
        INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
 
        INIT_LIST_HEAD(&priv->calib_results);
 
-       priv->band = IEEE80211_BAND_2GHZ;
+       priv->band = NL80211_BAND_2GHZ;
 
        priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
 
index ee7505537c963a08e1a282500ba0708cc920995a..b95c2d76db33c52cf6c5c9e1957ead160917f772 100644 (file)
@@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
  * fill "search" or "active" tx mode table.
  */
 static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-                                   enum ieee80211_band band,
+                                   enum nl80211_band band,
                                    struct iwl_scale_tbl_info *tbl,
                                    int *rate_idx)
 {
@@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
        /* legacy rate format */
        if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
                if (num_of_ant == 1) {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                tbl->lq_type = LQ_A;
                        else
                                tbl->lq_type = LQ_G;
@@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
        if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
                switch_to_legacy = 1;
                scale_index = rs_ht_to_legacy[scale_index];
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        tbl->lq_type = LQ_A;
                else
                        tbl->lq_type = LQ_G;
@@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
        /* Mask with station rate restriction */
        if (is_legacy(tbl->lq_type)) {
                /* supp_rates has no CCK bits in A mode */
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        rate_mask  = (u16)(rate_mask &
                           (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
                else
@@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
        table = &lq_sta->lq;
        tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
        rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
-       if (priv->band == IEEE80211_BAND_5GHZ)
+       if (priv->band == NL80211_BAND_5GHZ)
                rs_index -= IWL_FIRST_OFDM_RATE;
        mac_flags = info->status.rates[0].flags;
        mac_index = info->status.rates[0].idx;
@@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
                 * mac80211 HT index is always zero-indexed; we need to move
                 * HT OFDM rates after CCK rates in 2.4 GHz band
                 */
-               if (priv->band == IEEE80211_BAND_2GHZ)
+               if (priv->band == NL80211_BAND_2GHZ)
                        mac_index += IWL_FIRST_OFDM_RATE;
        }
        /* Here we actually compare this rate to the latest LQ command */
@@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
 
        /* mask with station rate restriction */
        if (is_legacy(tbl->lq_type)) {
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        /* supp_rates has no CCK bits in A mode */
                        rate_scale_index_msk = (u16) (rate_mask &
                                (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
        /* Get max rate if user set max rate */
        if (lq_sta) {
                lq_sta->max_rate_idx = txrc->max_rate_idx;
-               if ((sband->band == IEEE80211_BAND_5GHZ) &&
+               if ((sband->band == NL80211_BAND_5GHZ) &&
                    (lq_sta->max_rate_idx != -1))
                        lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
                if ((lq_sta->max_rate_idx < 0) ||
@@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
        } else {
                /* Check for invalid rates */
                if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-                               ((sband->band == IEEE80211_BAND_5GHZ) &&
+                               ((sband->band == NL80211_BAND_5GHZ) &&
                                 (rate_idx < IWL_FIRST_OFDM_RATE)))
                        rate_idx = rate_lowest_index(sband, sta);
                /* On valid 5 GHz rate, adjust index */
-               else if (sband->band == IEEE80211_BAND_5GHZ)
+               else if (sband->band == NL80211_BAND_5GHZ)
                        rate_idx -= IWL_FIRST_OFDM_RATE;
                info->control.rates[0].flags = 0;
        }
@@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
 
        /* Set last_txrate_idx to lowest rate */
        lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-       if (sband->band == IEEE80211_BAND_5GHZ)
+       if (sband->band == NL80211_BAND_5GHZ)
                lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
        lq_sta->is_agg = 0;
 #ifdef CONFIG_MAC80211_DEBUGFS
index c5fe4458461372ea2cdfe30b289e1715f370a445..50c1e951dd2de1b2088077fb77cb54c65c44a8bf 100644 (file)
@@ -355,7 +355,7 @@ struct iwl_lq_sta {
        u8 action_counter;      /* # mode-switch actions tried */
        u8 is_green;
        u8 is_dup;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
        u32 supp_rates;
index 27ea61e3a390bf1d85b4e5137d5c0ffc412aaf29..dfa2041cfdac06a175dfa319d368e916e602ca7a 100644 (file)
@@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
        /* rx_status carries information about the packet to mac80211 */
        rx_status.mactime = le64_to_cpu(phy_res->timestamp);
        rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                               NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
        rx_status.freq =
                ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
                                               rx_status.band);
index 2d47cb24c48b1142d2ee4822daee22afa4920811..b228552184b5189c8b23e8cb3ca89c86c0a63e5e 100644 (file)
@@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
 void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
                         struct iwl_rxon_context *ctx)
 {
-       enum ieee80211_band band = ch->band;
+       enum nl80211_band band = ch->band;
        u16 channel = ch->hw_value;
 
        if ((le16_to_cpu(ctx->staging.channel) == channel) &&
@@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
                return;
 
        ctx->staging.channel = cpu_to_le16(channel);
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
        else
                ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
 
 void iwl_set_flags_for_band(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
-                           enum ieee80211_band band,
+                           enum nl80211_band band,
                            struct ieee80211_vif *vif)
 {
-       if (band == IEEE80211_BAND_5GHZ) {
+       if (band == NL80211_BAND_5GHZ) {
                ctx->staging.flags &=
                    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
                      | RXON_FLG_CCK_MSK);
@@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
 
        iwlagn_set_rxon_chain(priv, ctx);
 
-       if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+       if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
                ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
        else
                ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
index 81a2ddbe9569a920cbc02cf0231c73293cf48912..d01766f16175eac5c3c6b8e66e95a75e89dafc50 100644 (file)
@@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
                       scan_notif->tsf_high, scan_notif->status);
 
        IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
-                      (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+                      (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
                       jiffies_to_msecs(jiffies - priv->scan_start));
 
        /*
@@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
 }
 
 static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
-                                    enum ieee80211_band band, u8 n_probes)
+                                    enum nl80211_band band, u8 n_probes)
 {
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                return IWL_ACTIVE_DWELL_TIME_52 +
                        IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
        else
@@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
 }
 
 static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
-                                     enum ieee80211_band band)
+                                     enum nl80211_band band)
 {
-       u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+       u16 passive = (band == NL80211_BAND_2GHZ) ?
            IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
            IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
 
@@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
-                                       enum ieee80211_band band)
+                                       enum nl80211_band band)
 {
        struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
        struct iwl_rxon_context *ctx;
@@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
 
 static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
                                          struct ieee80211_vif *vif,
-                                         enum ieee80211_band band,
+                                         enum nl80211_band band,
                                          struct iwl_scan_channel *scan_ch)
 {
        const struct ieee80211_supported_band *sband;
@@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
                        cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
                /* Set txpower levels to defaults */
                scan_ch->dsp_atten = 110;
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
                else
                        scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
 
 static int iwl_get_channels_for_scan(struct iwl_priv *priv,
                                     struct ieee80211_vif *vif,
-                                    enum ieee80211_band band,
+                                    enum nl80211_band band,
                                     u8 is_active, u8 n_probes,
                                     struct iwl_scan_channel *scan_ch)
 {
@@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
                 * power level:
                 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
                 */
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
                else
                        scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        u32 rate_flags = 0;
        u16 cmd_len = 0;
        u16 rx_chain = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u8 n_probes = 0;
        u8 rx_ant = priv->nvm_data->valid_rx_ant;
        u8 rate;
@@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 
        switch (priv->scan_band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
                chan_mod = le32_to_cpu(
                        priv->contexts[IWL_RXON_CTX_BSS].active.flags &
@@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                    priv->lib->bt_params->advanced_bt_coexist)
                        scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                rate = IWL_RATE_6M_PLCP;
                break;
        default:
@@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
        band = priv->scan_band;
 
-       if (band == IEEE80211_BAND_2GHZ &&
+       if (band == NL80211_BAND_2GHZ &&
            priv->lib->bt_params &&
            priv->lib->bt_params->advanced_bt_coexist) {
                /* transmit 2.4 GHz probes only on first antenna */
@@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 void iwl_init_scan_params(struct iwl_priv *priv)
 {
        u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
-       if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
-               priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-       if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
-               priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+       if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
+               priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+       if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
+               priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
 }
 
 int __must_check iwl_scan_initiate(struct iwl_priv *priv,
                                   struct ieee80211_vif *vif,
                                   enum iwl_scan_type scan_type,
-                                  enum ieee80211_band band)
+                                  enum nl80211_band band)
 {
        int ret;
 
index 8e9768a553e4ad0a38cabdfd2cd9cec2cb1ebb6c..de6ec9b7ace4587f778c2658dc5f7696debd8a4d 100644 (file)
@@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 
        /* Set up the rate scaling to start at selected rate, fall back
         * all the way down to 1M in IEEE order, and then spin on 1M */
-       if (priv->band == IEEE80211_BAND_5GHZ)
+       if (priv->band == NL80211_BAND_5GHZ)
                r = IWL_RATE_6M_INDEX;
        else if (ctx && ctx->vif && ctx->vif->p2p)
                r = IWL_RATE_6M_INDEX;
index 59e2001c39f8d3e26ba9fcbfa33b4450add6f7fa..4b97371c3b4291a8abf8ea5c9793f52351486d48 100644 (file)
@@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_flags |= TX_CMD_FLG_TSF_MSK;
        else if (ieee80211_is_back_req(fc))
                tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-       else if (info->band == IEEE80211_BAND_2GHZ &&
+       else if (info->band == NL80211_BAND_2GHZ &&
                 priv->lib->bt_params &&
                 priv->lib->bt_params->advanced_bt_coexist &&
                 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
@@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                rate_idx = rate_lowest_index(
                                &priv->nvm_data->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-       if (info->band == IEEE80211_BAND_5GHZ)
+       if (info->band == NL80211_BAND_5GHZ)
                rate_idx += IWL_FIRST_OFDM_RATE;
        /* Get PLCP rate for tx_cmd->rate_n_flags */
        rate_plcp = iwl_rates[rate_idx].plcp;
index a90dbab6bbbe3d60ac035b4883c5d1a0c5e21e24..5c2aae64d59fccdfd30cb34613f966591e145721 100644 (file)
 #define IWL1000_UCODE_API_MAX 5
 #define IWL100_UCODE_API_MAX 5
 
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
 /* Lowest firmware API version supported */
 #define IWL1000_UCODE_API_MIN 1
 #define IWL100_UCODE_API_MIN 5
@@ -68,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = {
 static const struct iwl_ht_params iwl1000_ht_params = {
        .ht_greenfield_support = true,
        .use_rts_for_aggregation = true, /* use rts/cts protection */
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ),
 };
 
 static const struct iwl_eeprom_params iwl1000_eeprom_params = {
@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
 #define IWL_DEVICE_1000                                                \
        .fw_name_pre = IWL1000_FW_PRE,                          \
        .ucode_api_max = IWL1000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL1000_UCODE_API_OK,                   \
        .ucode_api_min = IWL1000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_1000,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
 #define IWL_DEVICE_100                                         \
        .fw_name_pre = IWL100_FW_PRE,                           \
        .ucode_api_max = IWL100_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL100_UCODE_API_OK,                    \
        .ucode_api_min = IWL100_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_100,                 \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
        IWL_DEVICE_100,
 };
 
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
index a6da9594c4a5e0701d0209ac86f13c6c19d9e044..2e823bdc4757b15ab16c743ec8f7617d54ff91b2 100644 (file)
 #define IWL105_UCODE_API_MAX 6
 #define IWL135_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL2030_UCODE_API_MIN 5
 #define IWL2000_UCODE_API_MIN 5
@@ -95,7 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
 static const struct iwl_ht_params iwl2000_ht_params = {
        .ht_greenfield_support = true,
        .use_rts_for_aggregation = true, /* use rts/cts protection */
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ),
 };
 
 static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
@@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
 #define IWL_DEVICE_2000                                                \
        .fw_name_pre = IWL2000_FW_PRE,                          \
        .ucode_api_max = IWL2000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL2000_UCODE_API_OK,                   \
        .ucode_api_min = IWL2000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_2000,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
 #define IWL_DEVICE_2030                                                \
        .fw_name_pre = IWL2030_FW_PRE,                          \
        .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL2030_UCODE_API_OK,                   \
        .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_2030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
 #define IWL_DEVICE_105                                         \
        .fw_name_pre = IWL105_FW_PRE,                           \
        .ucode_api_max = IWL105_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL105_UCODE_API_OK,                    \
        .ucode_api_min = IWL105_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_105,                 \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
 #define IWL_DEVICE_135                                         \
        .fw_name_pre = IWL135_FW_PRE,                           \
        .ucode_api_max = IWL135_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL135_UCODE_API_OK,                    \
        .ucode_api_min = IWL135_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_135,                 \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
        .ht_params = &iwl2000_ht_params,
 };
 
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
index 8b5afdef2d8396fcf7aad68ea088419a44ca8b32..4c3e3cf4c799d221d669d89b59a3bb0915944917 100644 (file)
 #define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
 /* Lowest firmware API version supported */
 #define IWL5000_UCODE_API_MIN 1
 #define IWL5150_UCODE_API_MIN 1
@@ -66,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = {
 
 static const struct iwl_ht_params iwl5000_ht_params = {
        .ht_greenfield_support = true,
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_eeprom_params iwl5000_eeprom_params = {
@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
 #define IWL_DEVICE_5000                                                \
        .fw_name_pre = IWL5000_FW_PRE,                          \
        .ucode_api_max = IWL5000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL5000_UCODE_API_OK,                   \
        .ucode_api_min = IWL5000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_5000,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
        .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
        .fw_name_pre = IWL5000_FW_PRE,
        .ucode_api_max = IWL5000_UCODE_API_MAX,
-       .ucode_api_ok = IWL5000_UCODE_API_OK,
        .ucode_api_min = IWL5000_UCODE_API_MIN,
        .device_family = IWL_DEVICE_FAMILY_5000,
        .max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
 #define IWL_DEVICE_5150                                                \
        .fw_name_pre = IWL5150_FW_PRE,                          \
        .ucode_api_max = IWL5150_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL5150_UCODE_API_OK,                   \
        .ucode_api_min = IWL5150_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_5150,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
        IWL_DEVICE_5150,
 };
 
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
index 0b4ba781b6312f6dc017c938d61f1dffa40216a1..5a7b7e1f0aab5d790774b41f8a34711d26ee2044 100644 (file)
 #define IWL6000G2_UCODE_API_MAX 6
 #define IWL6035_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
@@ -117,7 +110,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
 static const struct iwl_ht_params iwl6000_ht_params = {
        .ht_greenfield_support = true,
        .use_rts_for_aggregation = true, /* use rts/cts protection */
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_eeprom_params iwl6000_eeprom_params = {
@@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
 #define IWL_DEVICE_6005                                                \
        .fw_name_pre = IWL6005_FW_PRE,                          \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
-       .ucode_api_ok = IWL6000G2_UCODE_API_OK,                 \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
        .device_family = IWL_DEVICE_FAMILY_6005,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
 #define IWL_DEVICE_6030                                                \
        .fw_name_pre = IWL6030_FW_PRE,                          \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
-       .ucode_api_ok = IWL6000G2B_UCODE_API_OK,                \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
        .device_family = IWL_DEVICE_FAMILY_6030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
 #define IWL_DEVICE_6035                                                \
        .fw_name_pre = IWL6030_FW_PRE,                          \
        .ucode_api_max = IWL6035_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL6035_UCODE_API_OK,                   \
        .ucode_api_min = IWL6035_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_6030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
 #define IWL_DEVICE_6000i                                       \
        .fw_name_pre = IWL6000_FW_PRE,                          \
        .ucode_api_max = IWL6000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL6000_UCODE_API_OK,                   \
        .ucode_api_min = IWL6000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_6000i,               \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
        .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
        .fw_name_pre = IWL6000_FW_PRE,
        .ucode_api_max = IWL6000_UCODE_API_MAX,
-       .ucode_api_ok = IWL6000_UCODE_API_OK,
        .ucode_api_min = IWL6000_UCODE_API_MIN,
        .device_family = IWL_DEVICE_FAMILY_6000,
        .max_inst_size = IWL60_RTC_INST_SIZE,
@@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
        .led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
index fc475ce59b47efe0c7a19ccddacfd3b580615915..abd2904ecc4816c0ec8e81194e09dc04240aabdf 100644 (file)
 #define IWL7265D_UCODE_API_MAX 21
 #define IWL3168_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   13
-#define IWL7265_UCODE_API_OK   13
-#define IWL7265D_UCODE_API_OK  13
-#define IWL3168_UCODE_API_OK   20
-
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  13
-#define IWL7265_UCODE_API_MIN  13
-#define IWL7265D_UCODE_API_MIN 13
+#define IWL7260_UCODE_API_MIN  16
+#define IWL7265_UCODE_API_MIN  16
+#define IWL7265D_UCODE_API_MIN 16
 #define IWL3168_UCODE_API_MIN  20
 
 /* NVM versions */
@@ -162,7 +156,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
 
 static const struct iwl_ht_params iwl7000_ht_params = {
        .stbc = true,
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 #define IWL_DEVICE_7000_COMMON                                 \
@@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
 #define IWL_DEVICE_7000                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7260_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL7260_UCODE_API_OK,                   \
        .ucode_api_min = IWL7260_UCODE_API_MIN
 
 #define IWL_DEVICE_7005                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL7265_UCODE_API_OK,                   \
        .ucode_api_min = IWL7265_UCODE_API_MIN
 
 #define IWL_DEVICE_3008                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL3168_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL3168_UCODE_API_OK,                   \
        .ucode_api_min = IWL3168_UCODE_API_MIN
 
 #define IWL_DEVICE_7005D                                       \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265D_UCODE_API_MAX,                \
-       .ucode_api_ok = IWL7265D_UCODE_API_OK,                  \
        .ucode_api_min = IWL7265D_UCODE_API_MIN
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -297,7 +287,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
 static const struct iwl_ht_params iwl7265_ht_params = {
        .stbc = true,
        .ldpc = true,
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
@@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = {
        .dccm_len = IWL7265_DCCM_LEN,
 };
 
-MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
index 97be104d12030f194a07ab67b5f0e0b8ee15f3e6..2d20556ce22d7b96028bb6537d1ef9cabf6728c8 100644 (file)
 #define IWL8000_UCODE_API_MAX  21
 #define IWL8265_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK   13
-#define IWL8265_UCODE_API_OK   20
-
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  13
+#define IWL8000_UCODE_API_MIN  16
 #define IWL8265_UCODE_API_MIN  20
 
 /* NVM versions */
@@ -93,7 +89,7 @@
 #define IWL8260_SMEM_OFFSET            0x400000
 #define IWL8260_SMEM_LEN               0x68000
 
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
        IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
@@ -128,7 +124,7 @@ static const struct iwl_base_params iwl8000_base_params = {
 static const struct iwl_ht_params iwl8000_ht_params = {
        .stbc = true,
        .ldpc = true,
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_tt_params iwl8000_tt_params = {
@@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
 #define IWL_DEVICE_8000                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
        .ucode_api_min = IWL8000_UCODE_API_MIN                          \
 
 #define IWL_DEVICE_8260                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
        .ucode_api_min = IWL8000_UCODE_API_MIN                          \
 
 #define IWL_DEVICE_8265                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8265_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8265_UCODE_API_OK,                           \
        .ucode_api_min = IWL8265_UCODE_API_MIN                          \
 
 const struct iwl_cfg iwl8260_2n_cfg = {
@@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
 };
 
-MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
index 318b1dc171f23510878a0d5df326020980ca3ff3..b9aca3795f06f1131e0ebee47965735bfcc0e18b 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 /* Highest firmware API version supported */
 #define IWL9000_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL9000_UCODE_API_OK   13
-
 /* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN  13
+#define IWL9000_UCODE_API_MIN  16
 
 /* NVM versions */
 #define IWL9000_NVM_VERSION            0x0a1d
@@ -96,7 +93,7 @@ static const struct iwl_base_params iwl9000_base_params = {
 static const struct iwl_ht_params iwl9000_ht_params = {
        .stbc = true,
        .ldpc = true,
-       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_tt_params iwl9000_tt_params = {
@@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
 
 #define IWL_DEVICE_9000                                                        \
        .ucode_api_max = IWL9000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL9000_UCODE_API_OK,                           \
        .ucode_api_min = IWL9000_UCODE_API_MIN,                         \
        .device_family = IWL_DEVICE_FAMILY_8000,                        \
        .max_inst_size = IWL60_RTC_INST_SIZE,                           \
@@ -137,14 +133,15 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .dccm2_len = IWL9000_DCCM2_LEN,                                 \
        .smem_offset = IWL9000_SMEM_OFFSET,                             \
        .smem_len = IWL9000_SMEM_LEN,                                   \
+       .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,           \
        .thermal_params = &iwl9000_tt_params,                           \
        .apmg_not_supported = true,                                     \
        .mq_rx_supported = true,                                        \
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true
 
-const struct iwl_cfg iwl9260_2ac_cfg = {
-               .name = "Intel(R) Dual Band Wireless AC 9260",
+const struct iwl_cfg iwl9560_2ac_cfg = {
+               .name = "Intel(R) Dual Band Wireless AC 9560",
                .fw_name_pre = IWL9000_FW_PRE,
                IWL_DEVICE_9000,
                .ht_params = &iwl9000_ht_params,
@@ -163,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
                .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index 3e4d346be3502ab1b84cb396ae9516dc2804c5f0..720679889ab31e8c37985059cbf92caf1dd43804 100644 (file)
@@ -131,6 +131,8 @@ enum iwl_led_mode {
 #define IWL_MAX_WD_TIMEOUT     120000
 
 #define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+                                NETIF_F_TSO | NETIF_F_TSO6)
 
 /* Antenna presence definitions */
 #define        ANT_NONE        0x0
@@ -183,7 +185,7 @@ struct iwl_base_params {
  * @stbc: support Tx STBC and 1*SS Rx STBC
  * @ldpc: support Tx/Rx with LDPC
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
+ * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
  */
 struct iwl_ht_params {
        enum ieee80211_smps_mode smps_mode;
@@ -277,8 +279,6 @@ struct iwl_pwr_tx_backoff {
  *     (.ucode) will be added to filename before loading from disk. The
  *     filename is constructed as fw_name_pre<api>.ucode.
  * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- *     without a warning, for use in transitions
  * @ucode_api_min: Lowest version of uCode API supported by driver.
  * @max_inst_size: The maximal length of the fw inst section
  * @max_data_size: The maximal length of the fw data section
@@ -324,7 +324,6 @@ struct iwl_cfg {
        const char *name;
        const char *fw_name_pre;
        const unsigned int ucode_api_max;
-       const unsigned int ucode_api_ok;
        const unsigned int ucode_api_min;
        const enum iwl_device_family device_family;
        const u32 max_data_size;
@@ -439,7 +438,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg;
 extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
 extern const struct iwl_cfg iwl5165_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
index f899666acb41f44f56a326f5b0bfcff6dce88bde..ff18b06586777f9dacff02cf8f0446d16e7f2744 100644 (file)
@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
                kfree(drv->fw.dbg_conf_tlv[i]);
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
                kfree(drv->fw.dbg_trigger_tlv[i]);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
+               kfree(drv->fw.dbg_mem_tlv[i]);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -238,19 +240,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
        snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
                 name_pre, tag);
 
-       /*
-        * Starting 8000B - FW name format has changed. This overwrites the
-        * previous name and uses the new format.
-        */
-       if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
-               if (rev_step != 'A')
-                       snprintf(drv->firmware_name,
-                                sizeof(drv->firmware_name), "%s%c-%s.ucode",
-                                name_pre, rev_step, tag);
-       }
-
        IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
                       (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
                                ? "EXPERIMENTAL " : "",
@@ -297,6 +286,7 @@ struct iwl_firmware_pieces {
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
 };
 
 /*
@@ -1041,6 +1031,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
                        gscan_capa = true;
                        break;
+               case IWL_UCODE_TLV_FW_MEM_SEG: {
+                       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+                               (void *)tlv_data;
+                       u32 type;
+
+                       if (tlv_len != (sizeof(*dbg_mem)))
+                               goto invalid_tlv_len;
+
+                       type = le32_to_cpu(dbg_mem->data_type);
+                       drv->fw.dbg_dynamic_mem = true;
+
+                       if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
+                               IWL_ERR(drv,
+                                       "Skip unknown dbg mem segment: %u\n",
+                                       dbg_mem->data_type);
+                               break;
+                       }
+
+                       if (pieces->dbg_mem_tlv[type]) {
+                               IWL_ERR(drv,
+                                       "Ignore duplicate mem segment: %u\n",
+                                       dbg_mem->data_type);
+                               break;
+                       }
+
+                       IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+                                      dbg_mem->data_type);
+
+                       pieces->dbg_mem_tlv[type] = dbg_mem;
+                       break;
+                       }
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -1060,11 +1081,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
-       if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-                !gscan_capa,
-                "GSCAN is supported but capabilities TLV is unavailable\n"))
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, or if it has an old format,
+        * warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           !gscan_capa) {
+               IWL_DEBUG_INFO(drv,
+                              "GSCAN is supported but capabilities TLV is unavailable\n");
                __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
                            capa->_capa);
+       }
 
        return 0;
 
@@ -1199,7 +1227,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        int err;
        struct iwl_firmware_pieces *pieces;
        const unsigned int api_max = drv->cfg->ucode_api_max;
-       unsigned int api_ok = drv->cfg->ucode_api_ok;
        const unsigned int api_min = drv->cfg->ucode_api_min;
        size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
        u32 api_ver;
@@ -1212,20 +1239,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                        IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
        fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
 
-       if (!api_ok)
-               api_ok = api_max;
-
        pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
        if (!pieces)
                return;
 
-       if (!ucode_raw) {
-               if (drv->fw_index <= api_ok)
-                       IWL_ERR(drv,
-                               "request for firmware file '%s' failed.\n",
-                               drv->firmware_name);
+       if (!ucode_raw)
                goto try_again;
-       }
 
        IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
                       drv->firmware_name, ucode_raw->size);
@@ -1267,19 +1286,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                api_max, api_ver);
                        goto try_again;
                }
-
-               if (api_ver < api_ok) {
-                       if (api_ok != api_max)
-                               IWL_ERR(drv, "Firmware has old API version, "
-                                       "expected v%u through v%u, got v%u.\n",
-                                       api_ok, api_max, api_ver);
-                       else
-                               IWL_ERR(drv, "Firmware has old API version, "
-                                       "expected v%u, got v%u.\n",
-                                       api_max, api_ver);
-                       IWL_ERR(drv, "New firmware can be obtained from "
-                                     "http://www.intellinuxwireless.org/.\n");
-               }
        }
 
        /*
@@ -1368,6 +1374,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
+               if (pieces->dbg_mem_tlv[i]) {
+                       drv->fw.dbg_mem_tlv[i] =
+                               kmemdup(pieces->dbg_mem_tlv[i],
+                                       sizeof(*drv->fw.dbg_mem_tlv[i]),
+                                       GFP_KERNEL);
+                       if (!drv->fw.dbg_mem_tlv[i])
+                               goto out_free_fw;
+               }
+       }
+
        /* Now that we can no longer fail, copy information */
 
        /*
@@ -1560,9 +1577,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
        .power_level = IWL_POWER_INDEX_1,
        .d0i3_disable = true,
        .d0i3_entry_delay = 1000,
-#ifndef CONFIG_IWLWIFI_UAPSD
-       .uapsd_disable = true,
-#endif /* CONFIG_IWLWIFI_UAPSD */
+       .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
        /* the rest are 0 by default */
 };
 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1681,12 +1696,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
 MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
 
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
-                  bool, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_IWLWIFI_UAPSD
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
-#else
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
-#endif
+                  uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uapsd_disable,
+                "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index c15f5be85197f683b6d687478b4d0c9d717cd3b4..bf1b69aec81342778049d9d46f9bf33a5ac32692 100644 (file)
@@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
                                int n_channels, s8 max_txpower_avg)
 {
        int ch_idx;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
-               IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+               NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
 
        for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
                struct ieee80211_channel *chan = &data->channels[ch_idx];
@@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
 
 static void iwl_mod_ht40_chan_info(struct device *dev,
                                   struct iwl_nvm_data *data, int n_channels,
-                                  enum ieee80211_band band, u16 channel,
+                                  enum nl80211_band band, u16 channel,
                                   const struct iwl_eeprom_channel *eeprom_ch,
                                   u8 clear_ht40_extension_channel)
 {
@@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
        IWL_DEBUG_EEPROM(dev,
                         "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
                         channel,
-                        band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+                        band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
                         CHECK_AND_PRINT(IBSS),
                         CHECK_AND_PRINT(ACTIVE),
                         CHECK_AND_PRINT(RADAR),
@@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                        n_channels++;
 
                        channel->hw_value = eeprom_ch_array[ch_idx];
-                       channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
-                                                   : IEEE80211_BAND_5GHZ;
+                       channel->band = (band == 1) ? NL80211_BAND_2GHZ
+                                                   : NL80211_BAND_5GHZ;
                        channel->center_freq =
                                ieee80211_channel_to_frequency(
                                        channel->hw_value, channel->band);
@@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
        /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
        for (band = 6; band <= 7; band++) {
-               enum ieee80211_band ieeeband;
+               enum nl80211_band ieeeband;
 
                iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
                                        &eeprom_ch_count, &eeprom_ch_info,
                                        &eeprom_ch_array);
 
                /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
-               ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
-                                      : IEEE80211_BAND_5GHZ;
+               ieeeband = (band == 6) ? NL80211_BAND_2GHZ
+                                      : NL80211_BAND_5GHZ;
 
                /* Loop through each band adding each of the channels */
                for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
@@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
 int iwl_init_sband_channels(struct iwl_nvm_data *data,
                            struct ieee80211_supported_band *sband,
-                           int n_channels, enum ieee80211_band band)
+                           int n_channels, enum nl80211_band band)
 {
        struct ieee80211_channel *chan = &data->channels[0];
        int n = 0, idx = 0;
@@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                          struct iwl_nvm_data *data,
                          struct ieee80211_sta_ht_cap *ht_info,
-                         enum ieee80211_band band,
+                         enum nl80211_band band,
                          u8 tx_chains, u8 rx_chains)
 {
        int max_bit_rate = 0;
@@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        int n_used = 0;
        struct ieee80211_supported_band *sband;
 
-       sband = &data->bands[IEEE80211_BAND_2GHZ];
-       sband->band = IEEE80211_BAND_2GHZ;
+       sband = &data->bands[NL80211_BAND_2GHZ];
+       sband->band = NL80211_BAND_2GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
        sband->n_bitrates = N_RATES_24;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
-                                         IEEE80211_BAND_2GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+                                         NL80211_BAND_2GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
                             data->valid_tx_ant, data->valid_rx_ant);
 
-       sband = &data->bands[IEEE80211_BAND_5GHZ];
-       sband->band = IEEE80211_BAND_5GHZ;
+       sband = &data->bands[NL80211_BAND_5GHZ];
+       sband->band = NL80211_BAND_5GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
        sband->n_bitrates = N_RATES_52;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
-                                         IEEE80211_BAND_5GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+                                         NL80211_BAND_5GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
                             data->valid_tx_ant, data->valid_rx_ant);
 
        if (n_channels != n_used)
index ad2b834668ff7a0d02ac0212d48762588aaaabe4..53f39a34eca2cc97d1cadcaca0e0dbd3c30d90ce 100644 (file)
@@ -98,7 +98,7 @@ struct iwl_nvm_data {
        s8 max_tx_pwr_half_dbm;
 
        bool lar_enabled;
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
        struct ieee80211_channel channels[];
 };
 
@@ -133,12 +133,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
 
 int iwl_init_sband_channels(struct iwl_nvm_data *data,
                            struct ieee80211_supported_band *sband,
-                           int n_channels, enum ieee80211_band band);
+                           int n_channels, enum nl80211_band band);
 
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                          struct iwl_nvm_data *data,
                          struct ieee80211_sta_ht_cap *ht_info,
-                         enum ieee80211_band band,
+                         enum nl80211_band band,
                          u8 tx_chains, u8 rx_chains);
 
 #endif /* __iwl_eeprom_parse_h__ */
index 8425e1a587d97a5ea00d914ce8f78538df1eb910..09b7ea28f4a0c256f6e78d68ab4b75daf9ca0164 100644 (file)
@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_RB = 11,
        IWL_FW_ERROR_DUMP_PAGING = 12,
        IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+       IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
index 15ec4e2907d8e124007e5ce3fe4f227b28718394..37dc09e8b6a7e24a0acd406ff9582fa2b1e68e42 100644 (file)
@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
        IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
        IWL_UCODE_TLV_FW_GSCAN_CAPA     = 50,
+       IWL_UCODE_TLV_FW_MEM_SEG        = 51,
 };
 
 struct iwl_ucode_tlv {
@@ -245,7 +246,6 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
 
 /**
  * enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
@@ -260,12 +260,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
-       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = (__force iwl_ucode_tlv_api_t)8,
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
        IWL_UCODE_TLV_API_WIDE_CMD_HDR          = (__force iwl_ucode_tlv_api_t)14,
        IWL_UCODE_TLV_API_LQ_SS_PARAMS          = (__force iwl_ucode_tlv_api_t)18,
-       IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
+       IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
        IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
        IWL_UCODE_TLV_API_TX_POWER_CHAIN        = (__force iwl_ucode_tlv_api_t)27,
 
@@ -324,6 +323,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
  * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
  *     regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ *     memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -361,6 +363,8 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT      = (__force iwl_ucode_tlv_capa_t)75,
        IWL_UCODE_TLV_CAPA_CTDP_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)76,
        IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED             = (__force iwl_ucode_tlv_capa_t)77,
+       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG        = (__force iwl_ucode_tlv_capa_t)80,
+       IWL_UCODE_TLV_CAPA_LQM_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)81,
 
        NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
@@ -490,6 +494,37 @@ enum iwl_fw_dbg_monitor_mode {
        MIPI_MODE = 3,
 };
 
+/**
+ * enum iwl_fw_mem_seg_type - data types for dumping on error
+ *
+ * @FW_DBG_MEM_SMEM: the data type is SMEM
+ * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
+ * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ */
+enum iwl_fw_dbg_mem_seg_type {
+       FW_DBG_MEM_DCCM_LMAC = 0,
+       FW_DBG_MEM_DCCM_UMAC,
+       FW_DBG_MEM_SMEM,
+
+       /* Must be last */
+       FW_DBG_MEM_MAX,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: enum %iwl_fw_mem_seg_type
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+       __le32 data_type;
+       __le32 ofs;
+       __le32 len;
+} __packed;
+
 /**
  * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
  *
index 2942571c613fffe268d71303ea512d0d013ea79c..e461d631893a98b4673083f4fbd009e6a83e7d9e 100644 (file)
@@ -286,6 +286,8 @@ struct iwl_fw {
        struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+       bool dbg_dynamic_mem;
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
        struct iwl_gscan_capabilities gscan_capa;
index d1a5dd1602f5894c52a286d18b18ad233a7b0715..6c5c2f9f73a26dde0594304e5c6f261532e5d6aa 100644 (file)
@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
        IWL_AMSDU_12K = 2,
 };
 
+enum iwl_uapsd_disable {
+       IWL_DISABLE_UAPSD_BSS           = BIT(0),
+       IWL_DISABLE_UAPSD_P2P_CLIENT    = BIT(1),
+};
+
 /**
  * struct iwl_mod_params
  *
@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
  * @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, default = 1
+ * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ *     IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
  * @d0i3_disable: disable d0i3, default = 1,
  * @d0i3_entry_delay: time to wait after no refs are taken before
  *     entering D0i3 (in msecs)
@@ -131,7 +137,7 @@ struct iwl_mod_params {
 #endif
        int ant_coupling;
        char *nvm_file;
-       bool uapsd_disable;
+       u32 uapsd_disable;
        bool d0i3_disable;
        unsigned int d0i3_entry_delay;
        bool lar_disable;
index 93a689583dff1678b5e1c4a56482a09de4ec31d9..14743c37d97685b93b30144999b6160f96fefeee 100644 (file)
@@ -308,7 +308,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                channel->hw_value = nvm_chan[ch_idx];
                channel->band = (ch_idx < num_2ghz_channels) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                               NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
                channel->center_freq =
                        ieee80211_channel_to_frequency(
                                channel->hw_value, channel->band);
@@ -320,7 +320,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 * is not used in mvm, and is used for backwards compatibility
                 */
                channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
-               is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+               is_5ghz = channel->band == NL80211_BAND_5GHZ;
 
                /* don't put limitations in case we're using LAR */
                if (!lar_supported)
@@ -439,22 +439,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                                &ch_section[NVM_CHANNELS_FAMILY_8000],
                                lar_supported);
 
-       sband = &data->bands[IEEE80211_BAND_2GHZ];
-       sband->band = IEEE80211_BAND_2GHZ;
+       sband = &data->bands[NL80211_BAND_2GHZ];
+       sband->band = NL80211_BAND_2GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
        sband->n_bitrates = N_RATES_24;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
-                                         IEEE80211_BAND_2GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+                                         NL80211_BAND_2GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
                             tx_chains, rx_chains);
 
-       sband = &data->bands[IEEE80211_BAND_5GHZ];
-       sband->band = IEEE80211_BAND_5GHZ;
+       sband = &data->bands[NL80211_BAND_5GHZ];
+       sband->band = NL80211_BAND_5GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
        sband->n_bitrates = N_RATES_52;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
-                                         IEEE80211_BAND_5GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+                                         NL80211_BAND_5GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
                             tx_chains, rx_chains);
        if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
                iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
@@ -781,7 +781,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        struct ieee80211_regdomain *regd;
        int size_of_regd;
        struct ieee80211_reg_rule *rule;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
        int valid_rules = 0;
        bool new_rule;
@@ -809,7 +809,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
-                      IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                      NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
                center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
                                                             band);
                new_rule = false;
@@ -857,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                IWL_DEBUG_DEV(dev, IWL_DL_LAR,
                              "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
                              center_freq,
-                             band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+                             band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
                              CHECK_AND_PRINT_I(VALID),
                              CHECK_AND_PRINT_I(ACTIVE),
                              CHECK_AND_PRINT_I(RADAR),
index c46e596e12b1807ade821c2b4ac07f1b1c265174..6c1d20ded04baf7a34bae96bac8c33f978250d27 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@ enum secure_load_status_reg {
 #define TXF_READ_MODIFY_DATA           (0xa00448)
 #define TXF_READ_MODIFY_ADDR           (0xa0044c)
 
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT         (0xA00538)
+#define TXF_CPU2_WR_PTR                (0xA00514)
+#define TXF_CPU2_RD_PTR                (0xA00510)
+#define TXF_CPU2_FENCE_PTR             (0xA00518)
+#define TXF_CPU2_LOCK_FENCE            (0xA00524)
+#define TXF_CPU2_NUM                   (0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA      (0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR      (0xA0054C)
+
 /* Radio registers access */
 #define RSP_RADIO_CMD                  (0xa02804)
 #define RSP_RADIO_RDDAT                        (0xa02814)
index 91d74b3f666b5d5e316ea4d0a66654f5a5533700..fa4ab4b9436fa3b6fb5d49103a06418f5a95c27f 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@ struct iwl_trans;
 
 struct iwl_trans_txq_scd_cfg {
        u8 fifo;
-       s8 sta_id;
+       u8 sta_id;
        u8 tid;
        bool aggregate;
        int frame_limit;
index 23e7e29375664fa5a3b6057aa209b83f0d78da0f..2e06dfc1c47766683d44970789bdef3d71192812 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o coex_legacy.o
+iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
index 2e098f8e0f8324595062ae48a2cf34ac55757487..a63f5bbb1ba7f925b7e5216128ee0e38fb10c781 100644 (file)
@@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
        chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
        if (!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+            chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
                rcu_read_unlock();
                return BT_COEX_INVALID_LUT;
        }
@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        struct iwl_bt_coex_cmd bt_cmd = {};
        u32 mode;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_send_bt_init_conf_old(mvm);
-
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -540,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        /* If channel context is invalid or not on 2.4GHz .. */
        if ((!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+            chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
                if (vif->type == NL80211_IFTYPE_STATION) {
                        /* ... relax constraints and disable rssi events */
                        iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
-               return;
-       }
-
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
        IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
-               return;
-       }
-
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
-
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
                return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
-
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
                return true;
 
@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
        if (ant & mvm->cfg->non_shared_ant)
                return true;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
                BT_HIGH_TRAFFIC;
 }
@@ -877,21 +853,15 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
        if (mvm->cfg->bt_shared_single_ant)
                return true;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band)
+                                   enum nl80211_band band)
 {
        u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
-
-       if (band != IEEE80211_BAND_2GHZ)
+       if (band != NL80211_BAND_2GHZ)
                return false;
 
        return bt_activity >= BT_LOW_TRAFFIC;
@@ -903,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
        __le16 fc = hdr->frame_control;
        bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
 
-       if (info->band != IEEE80211_BAND_2GHZ)
+       if (info->band != NL80211_BAND_2GHZ)
                return 0;
 
        if (unlikely(mvm->bt_tx_prio))
@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_bt_coex_vif_change_old(mvm);
-               return;
-       }
-
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
-               return;
-       }
-
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644 (file)
index 0150457..0000000
+++ /dev/null
@@ -1,1315 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "fw-api-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
-       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
-                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
-                      BT_COEX_PRIO_TBL_DISABLED, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
-       0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
-                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
-                                   &iwl_bt_prio_tbl);
-}
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0), /* 50% */
-       cpu_to_le32(0xc0c0c0c0), /* 25% */
-       cpu_to_le32(0xfcfcfcfc), /* 75% */
-       cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               /* Tight */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0x00004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Loose */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Tx Tx disabled */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xeeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
-       /* dummy entry for channel 0 */
-       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
-       {
-               cpu_to_le64(0x0000001FFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000000FFFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000003FFFCULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-       },
-       {
-               cpu_to_le64(0x00001FFFE0ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-       },
-       {
-               cpu_to_le64(0x00007FFF80ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-       },
-       {
-               cpu_to_le64(0x0003FFFC00ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-       },
-       {
-               cpu_to_le64(0x000FFFF000ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-       },
-       {
-               cpu_to_le64(0x007FFF8000ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-       },
-       {
-               cpu_to_le64(0x01FFFE0000ULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-       },
-       {
-               cpu_to_le64(0x0FFFF00000ULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-               cpu_to_le64(0x0ULL),
-       },
-       {
-               cpu_to_le64(0x3FFFC00000ULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFFE000000ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFF8000000ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFC0000000ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0ULL)
-       },
-};
-
-enum iwl_bt_kill_msk {
-       BT_KILL_MSK_DEFAULT,
-       BT_KILL_MSK_NEVER,
-       BT_KILL_MSK_ALWAYS,
-       BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
-       [BT_KILL_MSK_NEVER] = 0xffffffff,
-       [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-struct corunning_block_luts {
-       u8 range;
-       __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
-};
-
-/*
- * Ranges for the antenna coupling calibration / co-running block LUT:
- *             LUT0: [ 0, 12[
- *             LUT1: [12, 20[
- *             LUT2: [20, 21[
- *             LUT3: [21, 23[
- *             LUT4: [23, 27[
- *             LUT5: [27, 30[
- *             LUT6: [30, 32[
- *             LUT7: [32, 33[
- *             LUT8: [33, - [
- */
-static const struct corunning_block_luts antenna_coupling_ranges[] = {
-       {
-               .range = 0,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 12,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 20,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 21,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 23,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 27,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 30,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 32,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 33,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum iwl_bt_coex_lut_type ret;
-       u16 phy_ctx_id;
-
-       /*
-        * Checking that we hold mvm->mutex is a good idea, but the rate
-        * control can't acquire the mutex since it runs in Tx path.
-        * So this is racy in that case, but in the worst case, the AMPDU
-        * size limit will be wrong for a short time which is not a big
-        * issue.
-        */
-
-       rcu_read_lock();
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return BT_COEX_INVALID_LUT;
-       }
-
-       ret = BT_COEX_TX_DIS_LUT;
-
-       if (mvm->cfg->bt_shared_single_ant) {
-               rcu_read_unlock();
-               return ret;
-       }
-
-       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
-       if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
-       else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
-       /* else - default = TX TX disallowed */
-
-       rcu_read_unlock();
-
-       return ret;
-}
-
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret;
-       u32 flags;
-
-       ret = iwl_send_bt_prio_tbl(mvm);
-       if (ret)
-               return ret;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-               switch (mvm->bt_force_ant_mode) {
-               case BT_FORCE_ANT_AUTO:
-                       flags = BT_COEX_AUTO_OLD;
-                       break;
-               case BT_FORCE_ANT_BT:
-                       flags = BT_COEX_BT_OLD;
-                       break;
-               case BT_FORCE_ANT_WIFI:
-                       flags = BT_COEX_WIFI_OLD;
-                       break;
-               default:
-                       WARN_ON(1);
-                       flags = 0;
-               }
-
-               bt_cmd->flags = cpu_to_le32(flags);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
-               goto send_cmd;
-       }
-
-       bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr =
-               IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
-       bt_cmd->bt4_tx_rx_max_freq0 = 15;
-       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
-       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
-       flags = iwlwifi_mod_params.bt_coex_active ?
-                       BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
-       bt_cmd->flags = cpu_to_le32(flags);
-
-       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
-                                           BT_VALID_BT_PRIO_BOOST |
-                                           BT_VALID_MAX_KILL |
-                                           BT_VALID_3W_TMRS |
-                                           BT_VALID_KILL_ACK |
-                                           BT_VALID_KILL_CTS |
-                                           BT_VALID_REDUCED_TX_POWER |
-                                           BT_VALID_LUT |
-                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
-                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
-                                           BT_VALID_ANT_ISOLATION |
-                                           BT_VALID_ANT_ISOLATION_THRS |
-                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
-                                           BT_VALID_TXRX_MAX_FREQ_0 |
-                                           BT_VALID_SYNC_TO_SCO |
-                                           BT_VALID_TTC |
-                                           BT_VALID_RRC);
-
-       if (IWL_MVM_BT_COEX_SYNC2SCO)
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
-
-       if (iwl_mvm_bt_is_plcr_supported(mvm)) {
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                    BT_VALID_CORUN_LUT_40);
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
-       }
-
-       if (IWL_MVM_BT_COEX_MPLUT) {
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
-       }
-
-       if (IWL_MVM_BT_COEX_TTC)
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
-
-       if (iwl_mvm_bt_is_rrc_supported(mvm))
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
-
-       if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-                      sizeof(iwl_single_shared_ant));
-       else
-               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-                      sizeof(iwl_combined_lookup));
-
-       /* Take first Co-running block LUT to get started */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
-
-       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
-              sizeof(iwl_bt_prio_boost));
-       bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
-       bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
-send_cmd:
-       memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
-       memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
-       u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
-       u32 ag = le32_to_cpu(notif->bt_activity_grading);
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       u8 ack_kill_msk, cts_kill_msk;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .data[0] = &bt_cmd,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret = 0;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
-       cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
-
-       if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
-           mvm->bt_cts_kill_msk[0] == cts_kill_msk)
-               return 0;
-
-       mvm->bt_ack_kill_msk[0] = ack_kill_msk;
-       mvm->bt_cts_kill_msk[0] = cts_kill_msk;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
-       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_KILL_ACK |
-                                            BT_VALID_KILL_CTS);
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
-                                      bool enable)
-{
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       /* Send ASYNC since this can be sent from an atomic context */
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_DUP, },
-               .flags = CMD_ASYNC,
-       };
-       struct iwl_mvm_sta *mvmsta;
-       int ret;
-
-       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-       if (!mvmsta)
-               return 0;
-
-       /* nothing to do */
-       if (mvmsta->bt_reduced_txpower == enable)
-               return 0;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-       bt_cmd->valid_bit_msk =
-               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
-       bt_cmd->bt_reduced_tx_power = sta_id;
-
-       if (enable)
-               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
-       IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
-                      enable ? "en" : "dis", sta_id);
-
-       mvmsta->bt_reduced_txpower = enable;
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-struct iwl_bt_iterator_data {
-       struct iwl_bt_coex_profile_notif_old *notif;
-       struct iwl_mvm *mvm;
-       struct ieee80211_chanctx_conf *primary;
-       struct ieee80211_chanctx_conf *secondary;
-       bool primary_ll;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif,
-                                      bool enable, int rssi)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       mvmvif->bf_data.last_bt_coex_event = rssi;
-       mvmvif->bf_data.bt_coex_max_thold =
-               enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
-       mvmvif->bf_data.bt_coex_min_thold =
-               enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
-                                     struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
-       u32 bt_activity_grading;
-       int ave_rssi;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               /* default smps_mode for BSS / P2P client is AUTOMATIC */
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-               break;
-       case NL80211_IFTYPE_AP:
-               if (!mvmvif->ap_ibss_active)
-                       return;
-               break;
-       default:
-               return;
-       }
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       /* If channel context is invalid or not on 2.4GHz .. */
-       if ((!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-               if (vif->type == NL80211_IFTYPE_STATION) {
-                       /* ... relax constraints and disable rssi events */
-                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                           smps_mode);
-                       iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                                   false);
-                       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               }
-               return;
-       }
-
-       bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
-       if (bt_activity_grading >= BT_HIGH_TRAFFIC)
-               smps_mode = IEEE80211_SMPS_STATIC;
-       else if (bt_activity_grading >= BT_LOW_TRAFFIC)
-               smps_mode = vif->type == NL80211_IFTYPE_AP ?
-                               IEEE80211_SMPS_OFF :
-                               IEEE80211_SMPS_DYNAMIC;
-
-       /* relax SMPS contraints for next association */
-       if (!vif->bss_conf.assoc)
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-       if (mvmvif->phy_ctxt &&
-           data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-       IWL_DEBUG_COEX(data->mvm,
-                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
-                      mvmvif->id, data->notif->bt_status, bt_activity_grading,
-                      smps_mode);
-
-       if (vif->type == NL80211_IFTYPE_STATION)
-               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                   smps_mode);
-
-       /* low latency is always primary */
-       if (iwl_mvm_vif_low_latency(mvmvif)) {
-               data->primary_ll = true;
-
-               data->secondary = data->primary;
-               data->primary = chanctx_conf;
-       }
-
-       if (vif->type == NL80211_IFTYPE_AP) {
-               if (!mvmvif->ap_ibss_active)
-                       return;
-
-               if (chanctx_conf == data->primary)
-                       return;
-
-               if (!data->primary_ll) {
-                       /*
-                        * downgrade the current primary no matter what its
-                        * type is.
-                        */
-                       data->secondary = data->primary;
-                       data->primary = chanctx_conf;
-               } else {
-                       /* there is low latency vif - we will be secondary */
-                       data->secondary = chanctx_conf;
-               }
-               return;
-       }
-
-       /*
-        * STA / P2P Client, try to be primary if first vif. If we are in low
-        * latency mode, we are already in primary and just don't do much
-        */
-       if (!data->primary || data->primary == chanctx_conf)
-               data->primary = chanctx_conf;
-       else if (!data->secondary)
-               /* if secondary is not NULL, it might be a GO */
-               data->secondary = chanctx_conf;
-
-       /*
-        * don't reduce the Tx power if one of these is true:
-        *  we are in LOOSE
-        *  single share antenna product
-        *  BT is active
-        *  we are associated
-        */
-       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
-           mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
-           !data->notif->bt_status) {
-               iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* try to get the avg rssi from fw */
-       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
-       /* if the RSSI isn't valid, fake it is very low */
-       if (!ave_rssi)
-               ave_rssi = -100;
-       if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-       } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-       }
-
-       /* Begin to monitor the RSSI: it may influence the reduced Tx power */
-       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-               .notif = &mvm->last_bt_notif_old,
-       };
-       struct iwl_bt_coex_ci_cmd_old cmd = {};
-       u8 ci_bw_idx;
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       rcu_read_lock();
-       ieee80211_iterate_active_interfaces_atomic(
-                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                       iwl_mvm_bt_notif_iterator, &data);
-
-       if (data.primary) {
-               struct ieee80211_chanctx_conf *chan = data.primary;
-
-               if (WARN_ON(!chan->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_primary = 0;
-               } else {
-                       cmd.co_run_bw_primary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_primary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
-       }
-
-       if (data.secondary) {
-               struct ieee80211_chanctx_conf *chan = data.secondary;
-
-               if (WARN_ON(!data.secondary->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_secondary = 0;
-               } else {
-                       cmd.co_run_bw_secondary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_secondary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
-       }
-
-       rcu_read_unlock();
-
-       /* Don't spam the fw with the same command over and over */
-       if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
-               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
-                                        sizeof(cmd), &cmd))
-                       IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
-               memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
-       }
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
-
-       IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
-                      notif->bt_status ? "ON" : "OFF");
-       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
-       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-                      le32_to_cpu(notif->primary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-                      le32_to_cpu(notif->secondary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-                      le32_to_cpu(notif->bt_activity_grading));
-       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
-                      notif->bt_agg_traffic_load);
-
-       /* remember this notification for future use: rssi fluctuations */
-       memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
-
-       iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-                                    struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       /* If channel context is invalid or not on 2.4GHz - don't count it */
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return;
-       }
-       rcu_read_unlock();
-
-       if (vif->type != NL80211_IFTYPE_STATION ||
-           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event_data rssi_event)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-       };
-       int ret;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       /*
-        * Rssi update while not associated - can happen since the statistics
-        * are handled asynchronously
-        */
-       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       /* No BT - reports should be disabled */
-       if (!mvm->last_bt_notif_old.bt_status)
-               return;
-
-       IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
-                      rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
-       /*
-        * Check if rssi is good enough for reduced Tx power, but not in loose
-        * scheme.
-        */
-       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
-           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                                 false);
-       else
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
-       if (ret)
-               IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bt_rssi_iterator, &data);
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
-
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-                                   struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       enum iwl_bt_coex_lut_type lut_type;
-
-       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       if (mvm->last_bt_notif_old.ttc_enabled)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
-       if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       /* tight coex, high bt traffic, reduce AGG time limit */
-       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-                                        struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       enum iwl_bt_coex_lut_type lut_type;
-
-       if (mvm->last_bt_notif_old.ttc_enabled)
-               return true;
-
-       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return true;
-
-       /*
-        * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
-        * since BT is already killed.
-        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
-        * we Tx.
-        * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
-        */
-       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-       return lut_type != BT_COEX_LOOSE_LUT;
-}
-
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
-{
-       u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag < BT_HIGH_TRAFFIC;
-}
-
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-                                       enum ieee80211_band band)
-{
-       u32 bt_activity =
-               le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-
-       if (band != IEEE80211_BAND_2GHZ)
-               return false;
-
-       return bt_activity >= BT_LOW_TRAFFIC;
-}
-
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
-{
-       iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                      struct iwl_rx_cmd_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 ant_isolation = le32_to_cpup((void *)pkt->data);
-       u8 __maybe_unused lower_bound, upper_bound;
-       u8 lut;
-
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-
-       if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       if (ant_isolation ==  mvm->last_ant_isol)
-               return;
-
-       for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
-               if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
-                       break;
-
-       lower_bound = antenna_coupling_ranges[lut].range;
-
-       if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
-               upper_bound = antenna_coupling_ranges[lut + 1].range;
-       else
-               upper_bound = antenna_coupling_ranges[lut].range;
-
-       IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
-                      ant_isolation, lower_bound, upper_bound, lut);
-
-       mvm->last_ant_isol = ant_isolation;
-
-       if (mvm->last_corun_lut == lut)
-               return;
-
-       mvm->last_corun_lut = lut;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return;
-       cmd.data[0] = bt_cmd;
-
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_CORUN_LUT_20 |
-                                            BT_VALID_CORUN_LUT_40);
-
-       /* For the moment, use the same LUT for 20GHz and 40GHz */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
-
-       if (iwl_mvm_send_cmd(mvm, &cmd))
-               IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
-
-       kfree(bt_cmd);
-}
index 4b560e4417ee55fa1da2539dbe7f3c1482fe8790..b96b1c6a97fa2db0a0474046ee23e97cbfec34a4 100644 (file)
@@ -75,7 +75,6 @@
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT       (2 * 1024) /* defined in TU */
 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT       (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_UAPSD_STANDALONE           0
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE       0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
index c1a313149eed876fea73a764d0d519e0244c6e9d..e3561bbc2468c71e6fa4c891bc36de367b419f0a 100644 (file)
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
        }
 
-       ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+       ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
        if (ret)
                return ret;
        rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
index 14004456bf550db58ff95c511e6dee3203e32fed..fb96bc00f022586f5198e2519fe0e7e16a938ef4 100644 (file)
@@ -724,9 +724,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
 
                ret = kstrtou32(data, 10, &value);
                if (ret == 0 && value) {
-                       enum ieee80211_band band = (cmd->channel_num <= 14) ?
-                                                  IEEE80211_BAND_2GHZ :
-                                                  IEEE80211_BAND_5GHZ;
+                       enum nl80211_band band = (cmd->channel_num <= 14) ?
+                                                  NL80211_BAND_2GHZ :
+                                                  NL80211_BAND_5GHZ;
                        struct ieee80211_channel chn = {
                                .band = band,
                                .center_freq = ieee80211_channel_to_frequency(
@@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
+static const char * const chanwidths[] = {
+       [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+       [NL80211_CHAN_WIDTH_20] = "ht20",
+       [NL80211_CHAN_WIDTH_40] = "ht40",
+       [NL80211_CHAN_WIDTH_80] = "vht80",
+       [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+       [NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
+                                  struct iwl_rx_packet *pkt, void *data)
+{
+       struct ieee80211_vif *vif = data;
+       struct iwl_mvm *mvm =
+               container_of(notif_wait, struct iwl_mvm, notif_wait);
+       struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
+       u32 num_of_stations = le32_to_cpu(report->number_of_stations);
+       int i;
+
+       IWL_INFO(mvm, "LQM report:\n");
+       IWL_INFO(mvm, "\tstatus: %d\n", report->status);
+       IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
+       IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
+                le32_to_cpu(report->tx_frame_dropped));
+       IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
+                le32_to_cpu(report->time_in_measurement_window));
+       IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
+                le32_to_cpu(report->total_air_time_other_stations));
+       IWL_INFO(mvm, "\tchannel_freq: %d\n",
+                vif->bss_conf.chandef.center_freq1);
+       IWL_INFO(mvm, "\tchannel_width: %s\n",
+                chanwidths[vif->bss_conf.chandef.width]);
+       IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
+       for (i = 0; i < num_of_stations; i++)
+               IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
+                        report->frequent_stations_air_time[i]);
+
+       return true;
+}
+
+static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
+                                           char *buf, size_t count,
+                                           loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct iwl_notification_wait wait_lqm_notif;
+       static u16 lqm_notif[] = {
+               WIDE_ID(MAC_CONF_GROUP,
+                       LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
+       };
+       int err;
+       u32 duration;
+       u32 timeout;
+
+       if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
+               return -EINVAL;
+
+       iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
+                                  lqm_notif, ARRAY_SIZE(lqm_notif),
+                                  iwl_mvm_lqm_notif_wait, vif);
+       mutex_lock(&mvm->mutex);
+       err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
+                                  duration, timeout);
+       mutex_unlock(&mvm->mutex);
+
+       if (err) {
+               IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
+               iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
+               return err;
+       }
+
+       /* wait for 2 * timeout (safety guard) and convert to jiffies*/
+       timeout = msecs_to_jiffies((timeout * 2) / 1000);
+
+       err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
+                                   timeout);
+       if (err)
+               IWL_ERR(mvm, "Getting lqm notif timed out\n");
+
+       return count;
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index a43b3921c4c15e612aff154b86ece64db5a9117e..362a54601a80505a1d39d8b3521817f7308c8945 100644 (file)
@@ -65,6 +65,7 @@
  *****************************************************************************/
 #include <linux/vmalloc.h>
 #include <linux/ieee80211.h>
+#include <linux/netdevice.h>
 
 #include "mvm.h"
 #include "fw-dbg.h"
@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
        return pos;
 }
 
-static
-int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
-                              char *buf, int pos, int bufsz)
-{
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
-       BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
-       BT_MBOX_PRINT(0, LE_PROF1, false);
-       BT_MBOX_PRINT(0, LE_PROF2, false);
-       BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
-       BT_MBOX_PRINT(0, CHL_SEQ_N, false);
-       BT_MBOX_PRINT(0, INBAND_S, false);
-       BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
-       BT_MBOX_PRINT(0, LE_SCAN, false);
-       BT_MBOX_PRINT(0, LE_ADV, false);
-       BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
-       BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
-       BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
-       BT_MBOX_PRINT(1, IP_SR, false);
-       BT_MBOX_PRINT(1, LE_MSTR, false);
-       BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
-       BT_MBOX_PRINT(1, MSG_TYPE, false);
-       BT_MBOX_PRINT(1, SSN, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
-       BT_MBOX_PRINT(2, SNIFF_ACT, false);
-       BT_MBOX_PRINT(2, PAG, false);
-       BT_MBOX_PRINT(2, INQUIRY, false);
-       BT_MBOX_PRINT(2, CONN, false);
-       BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
-       BT_MBOX_PRINT(2, DISC, false);
-       BT_MBOX_PRINT(2, SCO_TX_ACT, false);
-       BT_MBOX_PRINT(2, SCO_RX_ACT, false);
-       BT_MBOX_PRINT(2, ESCO_RE_TX, false);
-       BT_MBOX_PRINT(2, SCO_DURATION, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
-       BT_MBOX_PRINT(3, SCO_STATE, false);
-       BT_MBOX_PRINT(3, SNIFF_STATE, false);
-       BT_MBOX_PRINT(3, A2DP_STATE, false);
-       BT_MBOX_PRINT(3, ACL_STATE, false);
-       BT_MBOX_PRINT(3, MSTR_STATE, false);
-       BT_MBOX_PRINT(3, OBX_STATE, false);
-       BT_MBOX_PRINT(3, OPEN_CON_2, false);
-       BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
-       BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
-       BT_MBOX_PRINT(3, INBAND_P, false);
-       BT_MBOX_PRINT(3, MSG_TYPE_2, false);
-       BT_MBOX_PRINT(3, SSN_2, false);
-       BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
-       return pos;
-}
-
 static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
                                       size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
        char *buf;
        int ret, pos = 0, bufsz = sizeof(char) * 1024;
 
@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               struct iwl_bt_coex_profile_notif_old *notif =
-                       &mvm->last_bt_notif_old;
-
-               pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
-
-               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                                notif->bt_ci_compliance);
-               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-                                le32_to_cpu(notif->primary_ch_lut));
-               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-                                le32_to_cpu(notif->secondary_ch_lut));
-               pos += scnprintf(buf+pos,
-                                bufsz-pos, "bt_activity_grading = %d\n",
-                                le32_to_cpu(notif->bt_activity_grading));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "antenna isolation = %d CORUN LUT index = %d\n",
-                                mvm->last_ant_isol, mvm->last_corun_lut);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-                                notif->rrc_enabled);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-                                notif->ttc_enabled);
-       } else {
-               struct iwl_bt_coex_profile_notif *notif =
-                       &mvm->last_bt_notif;
-
-               pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
-               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                                notif->bt_ci_compliance);
-               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-                                le32_to_cpu(notif->primary_ch_lut));
-               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-                                le32_to_cpu(notif->secondary_ch_lut));
-               pos += scnprintf(buf+pos,
-                                bufsz-pos, "bt_activity_grading = %d\n",
-                                le32_to_cpu(notif->bt_activity_grading));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "antenna isolation = %d CORUN LUT index = %d\n",
-                                mvm->last_ant_isol, mvm->last_corun_lut);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-                                (notif->ttc_rrc_status >> 4) & 0xF);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-                                notif->ttc_rrc_status & 0xF);
-       }
+       pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+                        notif->bt_ci_compliance);
+       pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+                        le32_to_cpu(notif->primary_ch_lut));
+       pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+                        le32_to_cpu(notif->secondary_ch_lut));
+       pos += scnprintf(buf + pos,
+                        bufsz - pos, "bt_activity_grading = %d\n",
+                        le32_to_cpu(notif->bt_activity_grading));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "antenna isolation = %d CORUN LUT index = %d\n",
+                        mvm->last_ant_isol, mvm->last_corun_lut);
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+                        (notif->ttc_rrc_status >> 4) & 0xF);
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+                        notif->ttc_rrc_status & 0xF);
 
        pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
                         IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
        char buf[256];
        int bufsz = sizeof(buf);
        int pos = 0;
 
        mutex_lock(&mvm->mutex);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "Channel inhibition CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tPrimary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_primary_ci));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tSecondary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_secondary_ci));
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "BT Configuration CMD - 0=default, 1=never, 2=always\n");
-               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
-                                mvm->bt_ack_kill_msk[0]);
-               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
-                                mvm->bt_cts_kill_msk[0]);
-
-       } else {
-               struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "Channel inhibition CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tPrimary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_primary_ci));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tSecondary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_secondary_ci));
-       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "\tPrimary Channel Bitmap 0x%016llx\n",
+                        le64_to_cpu(cmd->bt_primary_ci));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "\tSecondary Channel Bitmap 0x%016llx\n",
+                        le64_to_cpu(cmd->bt_secondary_ci));
 
        mutex_unlock(&mvm->mutex);
 
@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
        struct iwl_rss_config_cmd cmd = {
                .flags = cpu_to_le32(IWL_RSS_ENABLE),
                .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_UDP |
                             IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
                             IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_UDP |
                             IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
        };
        int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
        memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
               ARRAY_SIZE(cmd.indirection_table) % nbytes);
 
-       memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+       netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
        mutex_lock(&mvm->mutex);
        ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
index 7a16e55df01230fea7b4a098a9392a3969baf5ec..4c086d04809765be7fbffe81ad9361ec5a854241 100644 (file)
@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
        IWL_RX_MPDU_AMSDU_LAST_SUBFRAME         = 0x80,
 };
 
+enum iwl_rx_l3_proto_values {
+       IWL_RX_L3_TYPE_NONE,
+       IWL_RX_L3_TYPE_IPV4,
+       IWL_RX_L3_TYPE_IPV4_FRAG,
+       IWL_RX_L3_TYPE_IPV6_FRAG,
+       IWL_RX_L3_TYPE_IPV6,
+       IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+       IWL_RX_L3_TYPE_ARP,
+       IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
 enum iwl_rx_l3l4_flags {
        IWL_RX_L3L4_IP_HDR_CSUM_OK              = BIT(0),
        IWL_RX_L3L4_TCP_UDP_CSUM_OK             = BIT(1),
        IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH         = BIT(2),
        IWL_RX_L3L4_TCP_ACK                     = BIT(3),
-       IWL_RX_L3L4_L3_PROTO_MASK               = 0xf << 4,
+       IWL_RX_L3L4_L3_PROTO_MASK               = 0xf << IWL_RX_L3_PROTO_POS,
        IWL_RX_L3L4_L4_PROTO_MASK               = 0xf << 8,
        IWL_RX_L3L4_RSS_HASH_MASK               = 0xf << 12,
 };
index ba3f0bbddde8874db03a74cd9a0ca01df862bc38..dadcccd88255790c5b9f96c01af609bd382ae8b8 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
 #define IWL_BAR_DFAULT_RETRY_LIMIT             60
 #define IWL_LOW_RETRY_LIMIT                    7
 
+/**
+ * enum iwl_tx_offload_assist_flags_pos -  set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ *     from mac header end. For normal case it is 4 words for SNAP.
+ *     note: tx_cmd, mac header and pad are not counted in the offset.
+ *     This is used to help the offload in case there is tunneling such as
+ *     IPv6 in IPv4, in such case the ip header offset should point to the
+ *     inner ip header and IPv4 checksum of the external header should be
+ *     calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ *     field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ *     alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+       TX_CMD_OFFLD_IP_HDR =           0,
+       TX_CMD_OFFLD_L4_EN =            6,
+       TX_CMD_OFFLD_L3_EN =            7,
+       TX_CMD_OFFLD_MH_SIZE =          8,
+       TX_CMD_OFFLD_PAD =              13,
+       TX_CMD_OFFLD_AMSDU =            14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK       0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK   0x3f
+
 /* TODO: complete documentation for try_cnt and btkill_cnt */
 /**
  * struct iwl_tx_cmd - TX command struct to FW
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
  *     cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
  */
 struct iwl_tx_cmd {
        __le16 len;
-       __le16 next_frame_len;
+       __le16 offload_assist;
        __le32 tx_flags;
        struct {
                u8 try_cnt;
@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
        __le16 reserved4;
        u8 payload[0];
        struct ieee80211_hdr hdr[0];
-} __packed; /* TX_CMD_API_S_VER_3 */
+} __packed; /* TX_CMD_API_S_VER_6 */
 
 /*
  * TX response related data
index 4a0fc47c81f2fad865af95ae579e9dfef50cca34..60eed8485aba8b1ff0a2c297daeaeb47315c2a5c 100644 (file)
 #include "fw-api-stats.h"
 #include "fw-api-tof.h"
 
-/* Tx queue numbers */
+/* Tx queue numbers for non-DQA mode */
 enum {
        IWL_MVM_OFFCHANNEL_QUEUE = 8,
        IWL_MVM_CMD_QUEUE = 9,
 };
 
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ *     that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ *     Each MGMT queue is mapped to a single STA
+ *     MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ *     DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ *     the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ *     as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+       IWL_MVM_DQA_CMD_QUEUE = 0,
+       IWL_MVM_DQA_GCAST_QUEUE = 3,
+       IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+       IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+       IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+       IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+       IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
 enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BK = 0,
        IWL_MVM_TX_FIFO_BE,
@@ -279,6 +306,11 @@ enum {
 /* Please keep this enum *SORTED* by hex value.
  * Needed for binary search, otherwise a warning will be triggered.
  */
+enum iwl_mac_conf_subcmd_ids {
+       LINK_QUALITY_MEASUREMENT_CMD = 0x1,
+       LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+};
+
 enum iwl_phy_ops_subcmd_ids {
        CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
        CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +319,10 @@ enum iwl_phy_ops_subcmd_ids {
        DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
+enum iwl_system_subcmd_ids {
+       SHARED_MEM_CFG_CMD = 0x0,
+};
+
 enum iwl_data_path_subcmd_ids {
        UPDATE_MU_GROUPS_CMD = 0x1,
        TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +338,8 @@ enum iwl_prot_offload_subcmd_ids {
 enum {
        LEGACY_GROUP = 0x0,
        LONG_GROUP = 0x1,
+       SYSTEM_GROUP = 0x2,
+       MAC_CONF_GROUP = 0x3,
        PHY_OPS_GROUP = 0x4,
        DATA_PATH_GROUP = 0x5,
        PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1961,7 @@ struct iwl_tdls_config_res {
 
 #define TX_FIFO_MAX_NUM                8
 #define RX_FIFO_MAX_NUM                2
+#define TX_FIFO_INTERNAL_MAX_NUM       6
 
 /**
  * Shared memory configuration information from the FW
@@ -1940,6 +1979,12 @@ struct iwl_tdls_config_res {
  * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
  *     when paging is not supported this should be 0
  * @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ *      set, the last 3 members don't exist.
  */
 struct iwl_shared_mem_cfg {
        __le32 shared_mem_addr;
@@ -1951,7 +1996,10 @@ struct iwl_shared_mem_cfg {
        __le32 rxfifo_size[RX_FIFO_MAX_NUM];
        __le32 page_buff_addr;
        __le32 page_buff_size;
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+       __le32 rxfifo_addr;
+       __le32 internal_txfifo_addr;
+       __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
 
 /**
  * VHT MU-MIMO group configuration
@@ -2002,4 +2050,60 @@ struct iwl_stored_beacon_notif {
        u8 data[MAX_STORED_BEACON_SIZE];
 } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
 
+#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
+
+enum iwl_lqm_cmd_operatrions {
+       LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
+       LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
+};
+
+enum iwl_lqm_status {
+       LQM_STATUS_SUCCESS = 0,
+       LQM_STATUS_TIMEOUT = 1,
+       LQM_STATUS_ABORT = 2,
+};
+
+/**
+ * Link Quality Measurement command
+ * @cmd_operatrion: command operation to be performed (start or stop)
+ *     as defined above.
+ * @mac_id: MAC ID the measurement applies to.
+ * @measurement_time: time of the total measurement to be performed, in uSec.
+ * @timeout: maximum time allowed until a response is sent, in uSec.
+ */
+struct iwl_link_qual_msrmnt_cmd {
+       __le32 cmd_operation;
+       __le32 mac_id;
+       __le32 measurement_time;
+       __le32 timeout;
+} __packed /* LQM_CMD_API_S_VER_1 */;
+
+/**
+ * Link Quality Measurement notification
+ *
+ * @frequent_stations_air_time: an array containing the total air time
+ *     (in uSec) used by the most frequently transmitting stations.
+ * @number_of_stations: the number of uniqe stations included in the array
+ *     (a number between 0 to 16)
+ * @total_air_time_other_stations: the total air time (uSec) used by all the
+ *     stations which are not included in the above report.
+ * @time_in_measurement_window: the total time in uSec in which a measurement
+ *     took place.
+ * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
+ *     measurement
+ * @mac_id: MAC ID the measurement applies to.
+ * @status: return status. may be one of the LQM_STATUS_* defined above.
+ * @reserved: reserved.
+ */
+struct iwl_link_qual_msrmnt_notif {
+       __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
+       __le32 number_of_stations;
+       __le32 total_air_time_other_stations;
+       __le32 time_in_measurement_window;
+       __le32 tx_frame_dropped;
+       __le32 mac_id;
+       __le32 status;
+       __le32 reserved[3];
+} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+
 #endif /* __fw_api_h__ */
index 4856eac120f60d5eff2e9707e828e541761a76ae..e25171f9b407545d0d052ed3978f4574034fd768 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
                *dump_data = iwl_fw_error_next_data(*dump_data);
        }
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+               /* Pull UMAC internal TXF data from all TXFs */
+               for (i = 0;
+                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i++) {
+                       /* Mark the number of TXF we're pulling now */
+                       iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+
+                       fifo_hdr = (void *)(*dump_data)->data;
+                       fifo_data = (void *)fifo_hdr->data;
+                       fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+
+                       /* No need to try to read the data if the length is 0 */
+                       if (fifo_len == 0)
+                               continue;
+
+                       /* Add a TLV for the internal FIFOs */
+                       (*dump_data)->type =
+                               cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+                       (*dump_data)->len =
+                               cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+                       fifo_hdr->fifo_num = cpu_to_le32(i);
+                       fifo_hdr->available_bytes =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_FIFO_ITEM_CNT));
+                       fifo_hdr->wr_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_WR_PTR));
+                       fifo_hdr->rd_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_RD_PTR));
+                       fifo_hdr->fence_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_FENCE_PTR));
+                       fifo_hdr->fence_mode =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_LOCK_FENCE));
+
+                       /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+                       iwl_trans_write_prph(mvm->trans,
+                                            TXF_CPU2_READ_MODIFY_ADDR,
+                                            TXF_CPU2_WR_PTR);
+
+                       /* Dummy-read to advance the read pointer to head */
+                       iwl_trans_read_prph(mvm->trans,
+                                           TXF_CPU2_READ_MODIFY_DATA);
+
+                       /* Read FIFO */
+                       fifo_len /= sizeof(u32); /* Size in DWORDS */
+                       for (j = 0; j < fifo_len; j++)
+                               fifo_data[j] =
+                                       iwl_trans_read_prph(mvm->trans,
+                                                           TXF_CPU2_READ_MODIFY_DATA);
+                       *dump_data = iwl_fw_error_next_data(*dump_data);
+               }
+       }
+
        iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
@@ -429,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        struct iwl_fw_error_dump_trigger_desc *dump_trig;
        struct iwl_mvm_dump_ptrs *fw_error_dump;
        u32 sram_len, sram_ofs;
+       struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
+               mvm->fw->dbg_mem_tlv;
        u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
-       u32 smem_len = mvm->cfg->smem_len;
-       u32 sram2_len = mvm->cfg->dccm2_len;
+       u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
+       u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
        bool monitor_dump_only = false;
        int i;
 
@@ -494,6 +555,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         sizeof(struct iwl_fw_error_dump_fifo);
                }
 
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+                       for (i = 0;
+                            i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+                            i++) {
+                               if (!mem_cfg->internal_txfifo_size[i])
+                                       continue;
+
+                               /* Add header info */
+                               fifo_data_len +=
+                                       mem_cfg->internal_txfifo_size[i] +
+                                       sizeof(*dump_data) +
+                                       sizeof(struct iwl_fw_error_dump_fifo);
+                       }
+               }
+
                /* Make room for PRPH registers */
                for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
                        /* The range includes both boundaries */
@@ -511,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        file_len = sizeof(*dump_file) +
                   sizeof(*dump_data) * 2 +
-                  sram_len + sizeof(*dump_mem) +
                   fifo_data_len +
                   prph_len +
                   radio_len +
@@ -525,8 +601,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (sram2_len)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
+       /* Make room for MEM segments */
+       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+               if (fw_dbg_mem[i])
+                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+                               le32_to_cpu(fw_dbg_mem[i]->len);
+       }
+
        /* Make room for fw's virtual image pages, if it exists */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block)
                file_len += mvm->num_of_paging_blk *
                        (sizeof(*dump_data) +
                         sizeof(struct iwl_fw_error_dump_paging) +
@@ -550,6 +634,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
+       if (!mvm->fw->dbg_dynamic_mem)
+               file_len += sram_len + sizeof(*dump_mem);
+
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
@@ -599,16 +686,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (monitor_dump_only)
                goto dump_trans_data;
 
-       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-       dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
-       dump_mem = (void *)dump_data->data;
-       dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
-       dump_mem->offset = cpu_to_le32(sram_ofs);
-       iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
-                                sram_len);
+       if (!mvm->fw->dbg_dynamic_mem) {
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+               dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+               dump_mem = (void *)dump_data->data;
+               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+               dump_mem->offset = cpu_to_le32(sram_ofs);
+               iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+                                        sram_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+               if (fw_dbg_mem[i]) {
+                       u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
+                       u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
+
+                       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+                       dump_data->len = cpu_to_le32(len +
+                                       sizeof(*dump_mem));
+                       dump_mem = (void *)dump_data->data;
+                       dump_mem->type = fw_dbg_mem[i]->data_type;
+                       dump_mem->offset = cpu_to_le32(ofs);
+                       iwl_trans_read_mem_bytes(mvm->trans, ofs,
+                                                dump_mem->data,
+                                                len);
+                       dump_data = iwl_fw_error_next_data(dump_data);
+               }
+       }
 
        if (smem_len) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
                dump_mem = (void *)dump_data->data;
@@ -616,10 +723,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
                iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
                                         dump_mem->data, smem_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        if (sram2_len) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
                dump_mem = (void *)dump_data->data;
@@ -627,11 +734,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
                iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
                                         dump_mem->data, sram2_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
            CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
                                             sizeof(*dump_mem));
@@ -640,16 +747,17 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
                iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
                                         dump_mem->data, IWL8260_ICCM_LEN);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        /* Dump fw's virtual image */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block) {
                for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
                                mvm->fw_paging_db[i].fw_paging_block;
 
-                       dump_data = iwl_fw_error_next_data(dump_data);
                        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
                        dump_data->len = cpu_to_le32(sizeof(*paging) +
                                                     PAGING_BLOCK_SIZE);
@@ -657,10 +765,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                        paging->index = cpu_to_le32(i);
                        memcpy(paging->data, page_address(pages),
                               PAGING_BLOCK_SIZE);
+                       dump_data = iwl_fw_error_next_data(dump_data);
                }
        }
 
-       dump_data = iwl_fw_error_next_data(dump_data);
        if (prph_len)
                iwl_dump_prph(mvm->trans, &dump_data);
 
index 594cd0dc7df937d8b495f9c2864de13ed51f795d..b70f4530f9602ed50869fef38f381ded6e59f5bb 100644 (file)
@@ -64,6 +64,7 @@
  *
  *****************************************************************************/
 #include <net/mac80211.h>
+#include <linux/netdevice.h>
 
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
        struct iwl_rss_config_cmd cmd = {
                .flags = cpu_to_le32(IWL_RSS_ENABLE),
                .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_UDP |
                             IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
                             IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_UDP |
                             IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
        };
 
+       /* Do not direct RSS traffic to Q 0 which is our fallback queue */
        for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
-               cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
-       memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+               cmd.indirection_table[i] =
+                       1 + (i % (mvm->trans->num_rx_queues - 1));
+       netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
        return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 }
@@ -144,9 +149,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
 
                __free_pages(mvm->fw_paging_db[i].fw_paging_block,
                             get_order(mvm->fw_paging_db[i].fw_paging_size));
+               mvm->fw_paging_db[i].fw_paging_block = NULL;
        }
        kfree(mvm->trans->paging_download_buf);
        mvm->trans->paging_download_buf = NULL;
+       mvm->trans->paging_db = NULL;
 
        memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 }
@@ -174,8 +181,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
                }
        }
 
-       if (sec_idx >= IWL_UCODE_SECTION_MAX) {
-               IWL_ERR(mvm, "driver didn't find paging image\n");
+       /*
+        * If paging is enabled there should be at least 2 more sections left
+        * (one for CSS and one for Paging data)
+        */
+       if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+               IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
                iwl_free_fw_paging(mvm);
                return -EINVAL;
        }
@@ -410,7 +421,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
                goto exit;
        }
 
-       mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+       /* Add an extra page for headers */
+       mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
+                                                 FW_PAGING_SIZE,
                                                  GFP_KERNEL);
        if (!mvm->trans->paging_download_buf) {
                ret = -ENOMEM;
@@ -641,7 +654,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
         */
 
        memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
-       mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+       if (iwl_mvm_is_dqa_supported(mvm))
+               mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+       else
+               mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -788,17 +804,22 @@ out:
 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 {
        struct iwl_host_cmd cmd = {
-               .id = SHARED_MEM_CFG,
                .flags = CMD_WANT_SKB,
                .data = { NULL, },
                .len = { 0, },
        };
-       struct iwl_rx_packet *pkt;
        struct iwl_shared_mem_cfg *mem_cfg;
+       struct iwl_rx_packet *pkt;
        u32 i;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+               cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+       else
+               cmd.id = SHARED_MEM_CFG;
+
        if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
                return;
 
@@ -824,6 +845,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                le32_to_cpu(mem_cfg->page_buff_addr);
        mvm->shared_mem_cfg.page_buff_size =
                le32_to_cpu(mem_cfg->page_buff_size);
+
+       /* new API has more data */
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+               mvm->shared_mem_cfg.rxfifo_addr =
+                       le32_to_cpu(mem_cfg->rxfifo_addr);
+               mvm->shared_mem_cfg.internal_txfifo_addr =
+                       le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+               BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+                            sizeof(mem_cfg->internal_txfifo_size));
+
+               for (i = 0;
+                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i++)
+                       mvm->shared_mem_cfg.internal_txfifo_size[i] =
+                               le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+       }
+
        IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
        iwl_free_resp(&cmd);
@@ -942,7 +982,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
 
        /* Add all the PHY contexts */
-       chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+       chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
        cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
        for (i = 0; i < NUM_PHY_CTX; i++) {
                /*
index fba1cd2ce1ec3df402649d38c128c83bc741ce28..456067b2f48d27eace13e417b1600a2c9f3b4f07 100644 (file)
@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                .exclude_vif = exclude_vif,
                .used_hw_queues =
                        BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
-                       BIT(mvm->aux_queue) |
-                       BIT(IWL_MVM_CMD_QUEUE),
+                       BIT(mvm->aux_queue),
        };
 
+       if (iwl_mvm_is_dqa_supported(mvm))
+               data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+       else
+               data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
        lockdep_assert_held(&mvm->mutex);
 
        /* mark all VIF used hw queues */
@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                return 0;
        }
 
-       /* Find available queues, and allocate them to the ACs */
+       /*
+        * Find available queues, and allocate them to the ACs. When in
+        * DQA-mode they aren't really used, and this is done only so the
+        * mac80211 ieee80211_check_queues() function won't fail
+        */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                u8 queue = find_first_zero_bit(&used_hw_queues,
                                               mvm->first_agg_queue);
 
-               if (queue >= mvm->first_agg_queue) {
+               if (!iwl_mvm_is_dqa_supported(mvm) &&
+                   queue >= mvm->first_agg_queue) {
                        IWL_ERR(mvm, "Failed to allocate queue\n");
                        ret = -EIO;
                        goto exit_fail;
@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
 
        /* Allocate the CAB queue for softAP and GO interfaces */
        if (vif->type == NL80211_IFTYPE_AP) {
-               u8 queue = find_first_zero_bit(&used_hw_queues,
-                                              mvm->first_agg_queue);
+               u8 queue;
 
-               if (queue >= mvm->first_agg_queue) {
-                       IWL_ERR(mvm, "Failed to allocate cab queue\n");
-                       ret = -EIO;
-                       goto exit_fail;
+               if (!iwl_mvm_is_dqa_supported(mvm)) {
+                       queue = find_first_zero_bit(&used_hw_queues,
+                                                   mvm->first_agg_queue);
+
+                       if (queue >= mvm->first_agg_queue) {
+                               IWL_ERR(mvm, "Failed to allocate cab queue\n");
+                               ret = -EIO;
+                               goto exit_fail;
+                       }
+               } else {
+                       queue = IWL_MVM_DQA_GCAST_QUEUE;
                }
 
                vif->cab_queue = queue;
@@ -495,6 +510,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
                /* fall through */
        default:
+               /* If DQA is supported - queues will be enabled when needed */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       break;
+
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
                                              vif->hw_queue[ac],
@@ -523,6 +542,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                    IWL_MAX_TID_COUNT, 0);
                /* fall through */
        default:
+               /*
+                * If DQA is supported - queues were already disabled, since in
+                * DQA-mode the queues are a property of the STA and not of the
+                * vif, and at this point the STA was already deleted
+                */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       break;
+
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
                                            vif->hw_queue[ac],
@@ -532,7 +559,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
                              struct ieee80211_vif *vif,
-                             enum ieee80211_band band,
+                             enum nl80211_band band,
                              u8 *cck_rates, u8 *ofdm_rates)
 {
        struct ieee80211_supported_band *sband;
@@ -703,7 +730,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
        rcu_read_lock();
        chanctx = rcu_dereference(vif->chanctx_conf);
        iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
-                                           : IEEE80211_BAND_2GHZ,
+                                           : NL80211_BAND_2GHZ,
                          &cck_ack_rates, &ofdm_ack_rates);
        rcu_read_unlock();
 
@@ -1038,7 +1065,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
                            RATE_MCS_ANT_POS);
 
-       if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+       if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
                rate = IWL_FIRST_OFDM_RATE;
        } else {
                rate = IWL_FIRST_CCK_RATE;
@@ -1489,7 +1516,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
        rx_status.device_timestamp = le32_to_cpu(sb->system_time);
        rx_status.band =
                (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                               NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
        rx_status.freq =
                ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
                                               rx_status.band);
index 76e649c680a16bb5930f7c6d137aa429b779360d..5ace468070cbe4721ecb2fd3a3c2a0e9bc48e8c6 100644 (file)
@@ -550,18 +550,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
 
-       if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
-       if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+       if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                       &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+       if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+               hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                       &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
 
                if (fw_has_capa(&mvm->fw->ucode_capa,
                                IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
                    fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_LQ_SS_PARAMS))
-                       hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+                       hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
                                IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
        }
 
@@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
        hw->netdev_features |= mvm->cfg->features;
-       if (!iwl_mvm_is_csum_supported(mvm))
-               hw->netdev_features &= ~NETIF_F_RXCSUM;
-
-       if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
-               hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                       NETIF_F_TSO | NETIF_F_TSO6;
+       if (!iwl_mvm_is_csum_supported(mvm)) {
+               hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+                                        NETIF_F_RXCSUM);
+               /* We may support SW TX CSUM */
+               if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+                       hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+       }
 
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
@@ -992,6 +993,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
        memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+       memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
        memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1147,6 +1149,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /* the fw is stopped, the aux sta is dead: clean up driver state */
        iwl_mvm_del_aux_sta(mvm);
 
+       iwl_free_fw_paging(mvm);
+
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
@@ -1178,6 +1182,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 
        flush_work(&mvm->d0i3_exit_work);
        flush_work(&mvm->async_handlers_wk);
+       flush_work(&mvm->add_stream_wk);
        cancel_delayed_work_sync(&mvm->fw_dump_wk);
        iwl_mvm_free_fw_dump_desc(mvm);
 
@@ -1821,6 +1826,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
                iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
 
+       if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
+           mvmvif->lqm_active)
+               iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
+                                    0, 0);
+
        /*
         * If we're not associated yet, take the (new) BSSID before associating
         * so the firmware knows. If we're already associated, then use the old
@@ -2340,7 +2350,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return;
        }
 
-       if (iwlwifi_mod_params.uapsd_disable) {
+       if (!vif->p2p &&
+           (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
                vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
                return;
        }
@@ -2376,6 +2387,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
                                    peer_addr, action);
 }
 
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+                                            struct iwl_mvm_sta *mvm_sta)
+{
+       struct iwl_mvm_tid_data *tid_data;
+       struct sk_buff *skb;
+       int i;
+
+       spin_lock_bh(&mvm_sta->lock);
+       for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+               tid_data = &mvm_sta->tid_data[i];
+               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+                       ieee80211_free_txskb(mvm->hw, skb);
+       }
+       spin_unlock_bh(&mvm_sta->lock);
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -2396,6 +2423,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        /* if a STA is being removed, reuse its ID */
        flush_work(&mvm->sta_drained_wk);
 
+       /*
+        * If we are in a STA removal flow and in DQA mode:
+        *
+        * This is after the sync_rcu part, so the queues have already been
+        * flushed. No more TXs on their way in mac80211's path, and no more in
+        * the queues.
+        * Also, we won't be getting any new TX frames for this station.
+        * What we might have are deferred TX frames that need to be taken care
+        * of.
+        *
+        * Drop any still-queued deferred-frame before removing the STA, and
+        * make sure the worker is no longer handling frames for this STA.
+        */
+       if (old_state == IEEE80211_STA_NONE &&
+           new_state == IEEE80211_STA_NOTEXIST &&
+           iwl_mvm_is_dqa_supported(mvm)) {
+               struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+               iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+               flush_work(&mvm->add_stream_wk);
+
+               /*
+                * No need to make sure deferred TX indication is off since the
+                * worker will already remove it if it was on
+                */
+       }
+
        mutex_lock(&mvm->mutex);
        if (old_state == IEEE80211_STA_NOTEXIST &&
            new_state == IEEE80211_STA_NONE) {
@@ -2859,7 +2913,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
                        cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
                .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
                /* Set the channel info data */
-               .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
+               .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
                        PHY_BAND_24 : PHY_BAND_5,
                .channel_info.channel = channel->hw_value,
                .channel_info.width = PHY_VHT_CHANNEL_MODE20,
@@ -3628,6 +3682,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
 
                break;
        case NL80211_IFTYPE_STATION:
+               if (mvmvif->lqm_active)
+                       iwl_mvm_send_lqm_cmd(vif,
+                                            LQM_CMD_OPERATION_STOP_MEASUREMENT,
+                                            0, 0);
+
                /* Schedule the time event to a bit before beacon 1,
                 * to make sure we're in the new channel when the
                 * GO/AP arrives.
@@ -3727,6 +3786,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        if (!vif || vif->type != NL80211_IFTYPE_STATION)
                return;
 
+       /* Make sure we're done with the deferred traffic before flushing */
+       if (iwl_mvm_is_dqa_supported(mvm))
+               flush_work(&mvm->add_stream_wk);
+
        mutex_lock(&mvm->mutex);
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
index 9abbc93e3c066627b983e854b4d08abee4866196..85800ba0c667d39969aa7efab620fb13fd67d1db 100644 (file)
@@ -208,7 +208,7 @@ enum iwl_power_scheme {
 };
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL   10
-#define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+#define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 enum iwl_dbgfs_pm_mask {
@@ -453,6 +453,12 @@ struct iwl_mvm_vif {
 
        /* TCP Checksum Offload */
        netdev_features_t features;
+
+       /*
+        * link quality measurement - used to check whether this interface
+        * is in the middle of a link quality measurement
+        */
+       bool lqm_active;
 };
 
 static inline struct iwl_mvm_vif *
@@ -602,6 +608,9 @@ struct iwl_mvm_shared_mem_cfg {
        u32 rxfifo_size[RX_FIFO_MAX_NUM];
        u32 page_buff_addr;
        u32 page_buff_size;
+       u32 rxfifo_addr;
+       u32 internal_txfifo_addr;
+       u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 };
 
 struct iwl_mvm {
@@ -656,10 +665,17 @@ struct iwl_mvm {
                /* Map to HW queue */
                u32 hw_queue_to_mac80211;
                u8 hw_queue_refcount;
+               u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+               /*
+                * This is to mark that queue is reserved for a STA but not yet
+                * allocated. This is needed to make sure we have at least one
+                * available queue to use when adding a new STA
+                */
                bool setup_reserved;
                u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
        } queue_info[IWL_MAX_HW_QUEUES];
        spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+       struct work_struct add_stream_wk; /* To add streams to queues */
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
        const char *nvm_file_name;
@@ -679,11 +695,11 @@ struct iwl_mvm {
        struct iwl_rx_phy_info last_phy_info;
        struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
        struct work_struct sta_drained_wk;
+       unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
        unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
        atomic_t pending_frames[IWL_MVM_STATION_COUNT];
        u32 tfd_drained[IWL_MVM_STATION_COUNT];
        u8 rx_ba_sessions;
-       u32 secret_key[IWL_RSS_HASH_KEY_CNT];
 
        /* configured by mac80211 */
        u32 rts_threshold;
@@ -694,6 +710,7 @@ struct iwl_mvm {
        struct iwl_mcast_filter_cmd *mcast_filter_cmd;
        enum iwl_mvm_scan_type scan_type;
        enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+       struct timer_list scan_timer;
 
        /* max number of simultaneous scans the FW supports */
        unsigned int max_scans;
@@ -1063,7 +1080,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
 {
        return fw_has_capa(&mvm->fw->ucode_capa,
                           IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
-               IWL_MVM_P2P_UAPSD_STANDALONE;
+               !(iwlwifi_mod_params.uapsd_disable &
+                 IWL_DISABLE_UAPSD_P2P_CLIENT);
 }
 
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1115,9 +1133,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
 
 /* Utils */
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
-                                       enum ieee80211_band band);
+                                       enum nl80211_band band);
 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
-                              enum ieee80211_band band,
+                              enum nl80211_band band,
                               struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
@@ -1297,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout(unsigned long data);
 
 /* Scheduled scan */
 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1449,26 +1468,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
 bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band);
+                                   enum nl80211_band band);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event_data);
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-                                   struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-                                        struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-                                       enum ieee80211_band band);
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                      struct iwl_rx_cmd_buffer *rxb);
-
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void
@@ -1634,4 +1637,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             const char *errmsg);
 
+/* Link Quality Measurement */
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+                        enum iwl_lqm_cmd_operatrions operation,
+                        u32 duration, u32 timeout);
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+
 #endif /* __IWL_MVM_H__ */
index 5e8ab796d5bc06b86fcbdab0f48edb2698074579..8bfb8e06a90c4dd24765ca56aaf7a16abaa0f25b 100644 (file)
@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
                   RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
-                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
                       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
 
@@ -418,6 +418,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(REPLY_DEBUG_CMD),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+       HCMD_NAME(SHARED_MEM_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+       HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
+       HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+};
+
 /* Please keep this array *SORTED* by hex value.
  * Access is done through binary search
  */
@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+       [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+       [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
        [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
        [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -562,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
        INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
        INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+       INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
 
        spin_lock_init(&mvm->d0i3_tx_lock);
        spin_lock_init(&mvm->refs_lock);
@@ -601,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.command_groups = iwl_mvm_groups;
        trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
 
-       trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+       if (iwl_mvm_is_dqa_supported(mvm))
+               trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+       else
+               trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
        trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
        trans_cfg.scd_set_active = true;
 
@@ -707,8 +728,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        iwl_mvm_tof_init(mvm);
 
-       /* init RSS hash key */
-       get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+       setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
+                   (unsigned long)mvm);
 
        return op_mode;
 
@@ -761,10 +782,13 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
-       iwl_free_fw_paging(mvm);
-
        iwl_mvm_tof_clean(mvm);
 
+       del_timer_sync(&mvm->scan_timer);
+
+       mutex_destroy(&mvm->mutex);
+       mutex_destroy(&mvm->d0i3_suspend_mutex);
+
        ieee80211_free_hw(mvm->hw);
 }
 
index 6e6a56f2153d6e971207ef7ed6a5e61fecc6f5fc..95138830b9f8e4ee8ba83b68278f0478e313f47d 100644 (file)
@@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        u8 active_cnt, idle_cnt;
 
        /* Set the channel info data */
-       cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+       cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
              PHY_BAND_24 : PHY_BAND_5);
 
        cmd->ci.channel = chandef->chan->hw_value;
index f313910cd0269f9e764b527ae886a49b1d9e4ef7..7b1f6ad6062b89c8b0bb3b08331cc812e7515dcf 100644 (file)
@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                        cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
        }
 
-       cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+       cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
 
        if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
            cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
index 61d0a8cd13f918b0a75fbce2840751b252b3836d..81dd2f6a48a577e8b21bc54e655502c811d417f8 100644 (file)
@@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
 
 /* Convert a ucode rate into an rs_rate object */
 static int rs_rate_from_ucode_rate(const u32 ucode_rate,
-                                  enum ieee80211_band band,
+                                  enum nl80211_band band,
                                   struct rs_rate *rate)
 {
        u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
@@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
        if (!(ucode_rate & RATE_MCS_HT_MSK) &&
            !(ucode_rate & RATE_MCS_VHT_MSK)) {
                if (num_of_ant == 1) {
-                       if (band == IEEE80211_BAND_5GHZ)
+                       if (band == NL80211_BAND_5GHZ)
                                rate->type = LQ_LEGACY_A;
                        else
                                rate->type = LQ_LEGACY_G;
@@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                return;
        } else if (is_siso(rate)) {
                /* Downgrade to Legacy if we were in SISO */
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        rate->type = LQ_LEGACY_A;
                else
                        rate->type = LQ_LEGACY_G;
@@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
        rate->ant = column->ant;
 
        if (column->mode == RS_LEGACY) {
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               if (lq_sta->band == NL80211_BAND_5GHZ)
                        rate->type = LQ_LEGACY_A;
                else
                        rate->type = LQ_LEGACY_G;
@@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
 }
 
 static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                          struct rs_rate *rate, enum ieee80211_band band)
+                          struct rs_rate *rate, enum nl80211_band band)
 {
        int index = rate->index;
        bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
@@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_vif *vif = mvm_sta->vif;
        struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct iwl_rate_scale_data *window;
        struct rs_rate *rate = &tbl->rate;
        enum tpc_action action;
@@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
        rcu_read_lock();
        chanctx_conf = rcu_dereference(vif->chanctx_conf);
        if (WARN_ON(!chanctx_conf))
-               band = IEEE80211_NUM_BANDS;
+               band = NUM_NL80211_BANDS;
        else
                band = chanctx_conf->def.chan->band;
        rcu_read_unlock();
@@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
                rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
        else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
                rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
-       else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+       else if (lq_sta->band == NL80211_BAND_5GHZ)
                rate->type = LQ_LEGACY_A;
        else
                rate->type = LQ_LEGACY_G;
@@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
        } else {
                lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
 
-               if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+               if (lq_sta->band == NL80211_BAND_5GHZ) {
                        lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
                        lq_sta->optimal_nentries =
                                ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
 static void rs_get_initial_rate(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta,
                                struct iwl_lq_sta *lq_sta,
-                               enum ieee80211_band band,
+                               enum nl80211_band band,
                                struct rs_rate *rate)
 {
        int i, nentries;
@@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
        rate->index = find_first_bit(&lq_sta->active_legacy_rate,
                                     BITS_PER_LONG);
 
-       if (band == IEEE80211_BAND_5GHZ) {
+       if (band == NL80211_BAND_5GHZ) {
                rate->type = LQ_LEGACY_A;
                initial_rates = rs_optimal_rates_5ghz_legacy;
                nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
 static void rs_initialize_lq(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta,
                             struct iwl_lq_sta *lq_sta,
-                            enum ieee80211_band band,
+                            enum nl80211_band band,
                             bool init)
 {
        struct iwl_scale_tbl_info *tbl;
@@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
  * Called after adding a new station to initialize rate scaling
  */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         enum ieee80211_band band, bool init)
+                         enum nl80211_band band, bool init)
 {
        int i, j;
        struct ieee80211_hw *hw = mvm->hw;
@@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r,
 #ifdef CONFIG_MAC80211_DEBUGFS
 static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
                                            struct iwl_lq_cmd *lq_cmd,
-                                           enum ieee80211_band band,
+                                           enum nl80211_band band,
                                            u32 ucode_rate)
 {
        struct rs_rate rate;
index bdb6f2d8d854f1a16c24c037de44c1c2ea45fa1c..90d046fb24a0a0c5f7bac4451f0fdc61b070e3d3 100644 (file)
@@ -305,7 +305,7 @@ struct iwl_lq_sta {
        bool stbc_capable;      /* Tx STBC is supported by chip and Rx by STA */
        bool bfer_capable;      /* Remote supports beamformee and we BFer */
 
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
        unsigned long active_legacy_rate;
@@ -358,7 +358,7 @@ struct iwl_lq_sta {
 
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         enum ieee80211_band band, bool init);
+                         enum nl80211_band band, bool init);
 
 /* Notify RS about Tx status */
 void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
index d8cadf2fe0980cf793c98ab09b63e9be9ed32fa0..263e8a8576b73268e064be161f7784fbd646010d 100644 (file)
@@ -319,7 +319,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
        rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
        rx_status->band =
                (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+                               NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
        rx_status->freq =
                ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
                                               rx_status->band);
index 38e7fa9bd675126a29106bc32db641670b147a12..651604d18a3252af432d396bff7e923bd1439c2c 100644 (file)
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       u16 flags = le16_to_cpu(desc->l3l4_flags);
+       u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+                         IWL_RX_L3_PROTO_POS);
 
        if (mvmvif->features & NETIF_F_RXCSUM &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+           flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+           (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
@@ -451,8 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
        rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
-       rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
-                                              IEEE80211_BAND_2GHZ;
+       rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+                                              NL80211_BAND_2GHZ;
        rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
                                                         rx_status->band);
        iwl_mvm_get_signal_strength(mvm, desc, rx_status);
index 09eb72c4ae439206aefe2fd7d223bd5d8bea6a5a..6f609dd5c2220dc3b038e4d74b6407cee27a990e 100644 (file)
@@ -70,6 +70,7 @@
 
 #include "mvm.h"
 #include "fw-api-scan.h"
+#include "iwl-io.h"
 
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -162,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
        return cpu_to_le16(rx_chain);
 }
 
-static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
 {
-       if (band == IEEE80211_BAND_2GHZ)
+       if (band == NL80211_BAND_2GHZ)
                return cpu_to_le32(PHY_BAND_24);
        else
                return cpu_to_le32(PHY_BAND_5);
 }
 
 static inline __le32
-iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
                          bool no_cck)
 {
        u32 tx_ant;
@@ -181,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
                                     mvm->scan_last_antenna_idx);
        tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
 
-       if (band == IEEE80211_BAND_2GHZ && !no_cck)
+       if (band == NL80211_BAND_2GHZ && !no_cck)
                return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
                                   tx_ant);
        else
@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
                ieee80211_scan_completed(mvm->hw,
                                scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
+       } else {
+               IWL_ERR(mvm,
+                       "got scan complete notification but no scan is running\n");
        }
 
        mvm->last_ebs_successful =
@@ -586,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
        tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
                                         TX_CMD_FLG_BT_DIS);
        tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
-                                                          IEEE80211_BAND_2GHZ,
+                                                          NL80211_BAND_2GHZ,
                                                           no_cck);
        tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
 
        tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
                                         TX_CMD_FLG_BT_DIS);
        tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
-                                                          IEEE80211_BAND_5GHZ,
+                                                          NL80211_BAND_5GHZ,
                                                           no_cck);
        tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
 }
@@ -690,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        /* Insert ds parameter set element on 2.4 GHz band */
        newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
-                                                ies->ies[IEEE80211_BAND_2GHZ],
-                                                ies->len[IEEE80211_BAND_2GHZ],
+                                                ies->ies[NL80211_BAND_2GHZ],
+                                                ies->len[NL80211_BAND_2GHZ],
                                                 pos);
        params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
        params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
        pos = newpos;
 
-       memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
-              ies->len[IEEE80211_BAND_5GHZ]);
+       memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+              ies->len[NL80211_BAND_5GHZ]);
        params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
        params->preq.band_data[1].len =
-               cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
-       pos += ies->len[IEEE80211_BAND_5GHZ];
+               cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+       pos += ies->len[NL80211_BAND_5GHZ];
 
        memcpy(pos, ies->common_ies, ies->common_ie_len);
        params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
@@ -916,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
        unsigned int rates = 0;
        int i;
 
-       band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+       band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
        for (i = 0; i < band->n_bitrates; i++)
                rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
-       band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+       band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
        for (i = 0; i < band->n_bitrates; i++)
                rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
 
@@ -934,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        struct iwl_scan_config *scan_config;
        struct ieee80211_supported_band *band;
        int num_channels =
-               mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
-               mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+               mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+               mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
        int ret, i, j = 0, cmd_size;
        struct iwl_host_cmd cmd = {
                .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
                                         SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+                                        SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
                                         SCAN_CONFIG_FLAG_SET_ALL_TIMES |
                                         SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
                                         SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -988,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                                     IWL_CHANNEL_FLAG_EBS_ADD |
                                     IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
 
-       band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+       band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
        for (i = 0; i < band->n_channels; i++, j++)
                scan_config->channel_array[j] = band->channels[i].hw_value;
-       band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+       band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
        for (i = 0; i < band->n_channels; i++, j++)
                scan_config->channel_array[j] = band->channels[i].hw_value;
 
@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
        return -EIO;
 }
 
+#define SCAN_TIMEOUT (16 * HZ)
+
+void iwl_mvm_scan_timeout(unsigned long data)
+{
+       struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+
+       IWL_ERR(mvm, "regular scan timed out\n");
+
+       del_timer(&mvm->scan_timer);
+       iwl_force_nmi(mvm->trans);
+}
+
 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct cfg80211_scan_request *req,
                           struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
        iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
+       mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+
        return 0;
 }
 
@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
                ieee80211_scan_completed(mvm->hw, aborted);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
        } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
                ieee80211_sched_scan_stopped(mvm->hw);
                mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@ out:
                 * to release the scan reference here.
                 */
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
                if (notify)
                        ieee80211_scan_completed(mvm->hw, true);
        } else if (notify) {
index c2def1232a8c6e2a930219485205a93b6b1d943d..443a42855c9e188122af3e1782e76ff0f77e02dd 100644 (file)
@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
                }
        }
 
-       if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+       if (sta) {
                BUILD_BUG_ON(sizeof(sf_full_timeout) !=
                             sizeof(__le32) * SF_NUM_SCENARIO *
                             SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
        struct ieee80211_sta *sta;
        int ret = 0;
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
-               sf_cmd.state = cpu_to_le32(new_state);
-
        if (mvm->cfg->disable_dummy_notification)
                sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
 
@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
 
        switch (new_state) {
        case SF_UNINIT:
-               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
-                       iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        case SF_FULL_ON:
                if (sta_id == IWL_MVM_STATION_COUNT) {
index ef99942d71696b48163c83081d8e8c6944fd51c3..12614b7b7fe73eebd8e7c2ef4c6c92cddd8daa91 100644 (file)
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
 
 /* send station add/update command to firmware */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                          bool update)
+                          bool update, unsigned int flags)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
 
-       if (!update) {
+       if (!update || (flags & STA_MODIFY_QUEUES)) {
                add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
                memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+               if (flags & STA_MODIFY_QUEUES)
+                       add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
        }
 
        switch (sta->bandwidth) {
@@ -274,6 +277,211 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
                iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 }
 
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+                                  struct ieee80211_sta *sta, u8 ac, int tid,
+                                  struct ieee80211_hdr *hdr)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = iwl_mvm_ac_to_tx_fifo[ac],
+               .sta_id = mvmsta->sta_id,
+               .tid = tid,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+       u8 mac_queue = mvmsta->vif->hw_queue[ac];
+       int queue = -1;
+       int ssn;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /*
+        * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+        * exists
+        */
+       if (!ieee80211_is_data_qos(hdr->frame_control) ||
+           ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+                                               IWL_MVM_DQA_MAX_MGMT_QUEUE);
+               if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+                       IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+                                           queue);
+
+               /* If no such queue is found, we'll use a DATA queue instead */
+       }
+
+       if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+               queue = mvmsta->reserved_queue;
+               IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+       }
+
+       if (queue < 0)
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
+       if (queue >= 0)
+               mvm->queue_info[queue].setup_reserved = false;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       /* TODO: support shared queues for same RA */
+       if (queue < 0)
+               return -ENOSPC;
+
+       /*
+        * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+        * but for configuring the SCD to send A-MPDUs we need to mark the queue
+        * as aggregatable.
+        * Mark all DATA queues as allowing to be aggregated at some point
+        */
+       cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+                        queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
+                           queue, mvmsta->sta_id, tid);
+
+       ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+       iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
+                          wdg_timeout);
+
+       spin_lock_bh(&mvmsta->lock);
+       mvmsta->tid_data[tid].txq_id = queue;
+       mvmsta->tfd_queue_msk |= BIT(queue);
+
+       if (mvmsta->reserved_queue == queue)
+               mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+       spin_unlock_bh(&mvmsta->lock);
+
+       return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+       if (tid == IWL_MAX_TID_COUNT)
+               return IEEE80211_AC_VO; /* MGMT */
+
+       return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+                                      struct ieee80211_sta *sta, int tid)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       struct sk_buff *skb;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff_head deferred_tx;
+       u8 mac_queue;
+       bool no_queue = false; /* Marks if there is a problem with the queue */
+       u8 ac;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       skb = skb_peek(&tid_data->deferred_tx_frames);
+       if (!skb)
+               return;
+       hdr = (void *)skb->data;
+
+       ac = iwl_mvm_tid_to_ac_queue(tid);
+       mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+       if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+           iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+               IWL_ERR(mvm,
+                       "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+                       mvmsta->sta_id, tid);
+
+               /*
+                * Mark queue as problematic so later the deferred traffic is
+                * freed, as we can do nothing with it
+                */
+               no_queue = true;
+       }
+
+       __skb_queue_head_init(&deferred_tx);
+
+       /* Disable bottom-halves when entering TX path */
+       local_bh_disable();
+       spin_lock(&mvmsta->lock);
+       skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+       spin_unlock(&mvmsta->lock);
+
+       while ((skb = __skb_dequeue(&deferred_tx)))
+               if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+                       ieee80211_free_txskb(mvm->hw, skb);
+       local_bh_enable();
+
+       /* Wake queue */
+       iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+                                          add_stream_wk);
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       unsigned long deferred_tid_traffic;
+       int sta_id, tid;
+
+       mutex_lock(&mvm->mutex);
+
+       /* Go over all stations with deferred traffic */
+       for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+                        IWL_MVM_STATION_COUNT) {
+               clear_bit(sta_id, mvm->sta_deferred_frames);
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               if (IS_ERR_OR_NULL(sta))
+                       continue;
+
+               mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+               for_each_set_bit(tid, &deferred_tid_traffic,
+                                IWL_MAX_TID_COUNT + 1)
+                       iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+       }
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+                                     struct ieee80211_sta *sta,
+                                     enum nl80211_iftype vif_type)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       int queue;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /* Make sure we have free resources for this STA */
+       if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
+               queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+       else
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
+       if (queue < 0) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm, "No available queues for new station\n");
+               return -ENOSPC;
+       }
+       mvm->queue_info[queue].setup_reserved = true;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       mvmsta->reserved_queue = queue;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+                           queue, mvmsta->sta_id);
+
+       return 0;
+}
+
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                    struct ieee80211_vif *vif,
                    struct ieee80211_sta *sta)
@@ -314,18 +522,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                ret = iwl_mvm_tdls_sta_init(mvm, sta);
                if (ret)
                        return ret;
-       } else {
+       } else if (!iwl_mvm_is_dqa_supported(mvm)) {
                for (i = 0; i < IEEE80211_NUM_ACS; i++)
                        if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
                                mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
        }
 
        /* for HW restart - reset everything but the sequence number */
-       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+       for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                u16 seq = mvm_sta->tid_data[i].seq_number;
                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
                mvm_sta->tid_data[i].seq_number = seq;
+
+               if (!iwl_mvm_is_dqa_supported(mvm))
+                       continue;
+
+               /*
+                * Mark all queues for this STA as unallocated and defer TX
+                * frames until the queue is allocated
+                */
+               mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+               skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
        }
+       mvm_sta->deferred_traffic_tid_map = 0;
        mvm_sta->agg_tids = 0;
 
        if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +557,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                mvm_sta->dup_data = dup_data;
        }
 
-       ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+       if (iwl_mvm_is_dqa_supported(mvm)) {
+               ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+                                                ieee80211_vif_type_p2p(vif));
+               if (ret)
+                       goto err;
+       }
+
+       ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
        if (ret)
                goto err;
 
@@ -364,7 +590,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta)
 {
-       return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+       return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
 }
 
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +735,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
        mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      struct iwl_mvm_sta *mvm_sta)
+{
+       int ac;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+               if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+                       continue;
+
+               ac = iwl_mvm_tid_to_ac_queue(i);
+               iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+                                   vif->hw_queue[ac], i, 0);
+               mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+       }
+}
+
 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                   struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta)
@@ -537,6 +783,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                        return ret;
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
+               /* If DQA is supported - the queues can be disabled now */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
                /* if we are associated - we can't remove the AP STA now */
                if (vif->bss_conf.assoc)
                        return ret;
index 1a8f69a41405da4625b83d9d193e0fba4d09dae4..e3efdcd900f0ecd4d6cea823cf327aa044d0f47e 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 struct iwl_mvm;
 struct iwl_mvm_vif;
 
+/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ *     TXQ #0 - command queue
+ *     TXQ #1 - aux frames
+ *     TXQ #2 - P2P device frames
+ *     TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ *     TXQ #4 - BSS DATA frames queue
+ *     TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ *     TXQ #9 - P2P GO/SoftAP probe responses
+ *     TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
 /**
  * DOC: station table - introduction
  *
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
 
 /**
  * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
  * @seq_number: the next WiFi sequence number to use
  * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
  *     This is basically (last acked packet++).
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session
+ * @txq_id: Tx queue used by the BA session / DQA
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
  *     the first packet to be sent in legacy HW queue in Tx AGG stop flow.
  *     Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
  * @tx_time: medium time consumed by this A-MPDU
  */
 struct iwl_mvm_tid_data {
+       struct sk_buff_head deferred_tx_frames;
        u16 seq_number;
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data {
  *     we need to signal the EOSP
  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
  * and from Tx response flow, it needs a spinlock.
- * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ *     Every STA has is given one reserved queue to allow it to operate. If no
+ *     such queue can be guaranteed, the STA addition will fail.
  * @tx_protection: reference counter for controlling the Tx protection.
  * @tt_tx_protection: is thermal throttling enable Tx protection?
  * @disable_tx: is tx to this STA disabled?
@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  *
  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
  * in the structure for use by driver. This structure is placed in that
@@ -345,12 +405,16 @@ struct iwl_mvm_sta {
        bool bt_reduced_txpower;
        bool next_status_eosp;
        spinlock_t lock;
-       struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+       struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
        struct iwl_lq_sta lq_sta;
        struct ieee80211_vif *vif;
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
+       u16 deferred_traffic_tid_map;
+
+       u8 reserved_queue;
+
        /* Temporary, until the new TLC will control the Tx protection */
        s8 tx_protection;
        bool tt_tx_protection;
@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta {
        u32 tfd_queue_msk;
 };
 
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ *     about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ *     from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                          bool update);
+                          bool update, unsigned int flags);
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                    struct ieee80211_vif *vif,
                    struct ieee80211_sta *sta);
@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
                                       struct iwl_mvm_vif *mvmvif,
                                       bool disable);
 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 
 #endif /* __sta_h__ */
index 18711c5de35a888a7e2cf96b1ce4b02a834bef7e..9f160fc58cd079395cafeff5995574d22225a161 100644 (file)
@@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
        }
 
        if (chandef) {
-               cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+               cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
                               PHY_BAND_24 : PHY_BAND_5);
                cmd.ci.channel = chandef->chan->hw_value;
                cmd.ci.width = iwl_mvm_get_channel_width(chandef);
index f1f28255a3a637a2653787cfb267a0c0a44cc43c..eb3f460ce1b6b6bdc80a1b9f540c2bdbb7e3bf8f 100644 (file)
@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
        if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
                return;
 
-       /*
-        * We are now handling a temperature notification from the firmware
-        * in ASYNC and hold the mutex. thermal_notify_framework will call
-        * us back through get_temp() which ought to send a SYNC command to
-        * the firmware and hence to take the mutex.
-        * Avoid the deadlock by unlocking the mutex here.
-        */
        if (mvm->tz_device.tzone) {
                struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
 
-               mutex_unlock(&mvm->mutex);
                thermal_notify_framework(tz_dev->tzone,
                                         tz_dev->fw_trips_index[ths_crossed]);
-               mutex_lock(&mvm->mutex);
        }
 #endif /* CONFIG_THERMAL */
 }
@@ -796,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
 {
        struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
 
-       if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-               return -EBUSY;
-
        *state = mvm->cooling_dev.cur_state;
 
        return 0;
@@ -813,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
                return -EIO;
 
-       if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-               return -EBUSY;
-
        mutex_lock(&mvm->mutex);
 
        if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
index 75870e68a7c344285ce4652318c721648a756121..bd286fca3776b0f52b95818b848e7d7668988a8e 100644 (file)
@@ -67,6 +67,7 @@
 #include <linux/etherdevice.h>
 #include <linux/tcp.h>
 #include <net/ip.h>
+#include <net/ipv6.h>
 
 #include "iwl-trans.h"
 #include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
                                    addr, tid, ssn);
 }
 
+#define OPT_HDR(type, skb, off) \
+       (type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+                           struct ieee80211_hdr *hdr,
+                           struct ieee80211_tx_info *info,
+                           struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+       u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+       u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+       u8 protocol = 0;
+
+       /*
+        * Do not compute checksum if already computed or if transport will
+        * compute it
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+               return;
+
+       /* We do not expect to be requested to csum stuff we do not support */
+       if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+                     (skb->protocol != htons(ETH_P_IP) &&
+                      skb->protocol != htons(ETH_P_IPV6)),
+                     "No support for requested checksum\n")) {
+               skb_checksum_help(skb);
+               return;
+       }
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               protocol = ip_hdr(skb)->protocol;
+       } else {
+#if IS_ENABLED(CONFIG_IPV6)
+               struct ipv6hdr *ipv6h =
+                       (struct ipv6hdr *)skb_network_header(skb);
+               unsigned int off = sizeof(*ipv6h);
+
+               protocol = ipv6h->nexthdr;
+               while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+                       /* only supported extension headers */
+                       if (protocol != NEXTHDR_ROUTING &&
+                           protocol != NEXTHDR_HOP &&
+                           protocol != NEXTHDR_DEST &&
+                           protocol != NEXTHDR_FRAGMENT) {
+                               skb_checksum_help(skb);
+                               return;
+                       }
+
+                       if (protocol == NEXTHDR_FRAGMENT) {
+                               struct frag_hdr *hp =
+                                       OPT_HDR(struct frag_hdr, skb, off);
+
+                               protocol = hp->nexthdr;
+                               off += sizeof(struct frag_hdr);
+                       } else {
+                               struct ipv6_opt_hdr *hp =
+                                       OPT_HDR(struct ipv6_opt_hdr, skb, off);
+
+                               protocol = hp->nexthdr;
+                               off += ipv6_optlen(hp);
+                       }
+               }
+               /* if we get here - protocol now should be TCP/UDP */
+#endif
+       }
+
+       if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+               WARN_ON_ONCE(1);
+               skb_checksum_help(skb);
+               return;
+       }
+
+       /* enable L4 csum */
+       offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+       /*
+        * Set offset to IP header (snap).
+        * We don't support tunneling so no need to take care of inner header.
+        * Size is in words.
+        */
+       offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+       /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+       if (skb->protocol == htons(ETH_P_IP) &&
+           (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+               ip_hdr(skb)->check = 0;
+               offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+       }
+
+       /* reset UDP/TCP header csum */
+       if (protocol == IPPROTO_TCP)
+               tcp_hdr(skb)->check = 0;
+       else
+               udp_hdr(skb)->check = 0;
+
+       /* mac header len should include IV, size is in words */
+       if (info->control.hw_key)
+               mh_len += info->control.hw_key->iv_len;
+       mh_len /= 2;
+       offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+       tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -126,6 +232,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                u8 *qc = ieee80211_get_qos_ctl(hdr);
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+               if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+                       tx_cmd->offload_assist |=
+                               cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
        } else if (ieee80211_is_back_req(fc)) {
                struct ieee80211_bar *bar = (void *)skb->data;
                u16 control = le16_to_cpu(bar->control);
@@ -186,9 +295,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        /* Total # bytes to be transmitted */
        tx_cmd->len = cpu_to_le16((u16)skb->len +
                (uintptr_t)info->driver_data[0]);
-       tx_cmd->next_frame_len = 0;
        tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        tx_cmd->sta_id = sta_id;
+
+       /* padding is inserted later in transport */
+       if (ieee80211_hdrlen(fc) % 4 &&
+           !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+               tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+       iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
 }
 
 /*
@@ -244,7 +359,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                                &mvm->nvm_data->bands[info->band], sta);
 
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-       if (info->band == IEEE80211_BAND_5GHZ)
+       if (info->band == NL80211_BAND_5GHZ)
                rate_idx += IWL_FIRST_OFDM_RATE;
 
        /* For 2.4 GHZ band, check that there is no need to remap */
@@ -257,7 +372,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
                                     mvm->mgmt_last_antenna_idx);
 
-       if (info->band == IEEE80211_BAND_2GHZ &&
+       if (info->band == NL80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
                rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
@@ -459,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
        u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
        u16 amsdu_add, snap_ip_tcp, pad, i = 0;
        unsigned int dbg_max_amsdu_len;
+       netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
        u8 *qc, tid, txf;
 
        snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -477,6 +593,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
                goto segment;
        }
 
+       /*
+        * Do not build AMSDU for IPv6 with extension headers.
+        * ask stack to segment and checkum the generated MPDUs for us.
+        */
+       if (skb->protocol == htons(ETH_P_IPV6) &&
+           ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+           IPPROTO_TCP) {
+               num_subframes = 1;
+               pad = 0;
+               netdev_features &= ~NETIF_F_CSUM_MASK;
+               goto segment;
+       }
+
        /*
         * No need to lock amsdu_in_ampdu_allowed since it can't be modified
         * during an BA session.
@@ -570,7 +699,7 @@ segment:
        skb_shinfo(skb)->gso_size = num_subframes * mss;
        memcpy(cb, skb->cb, sizeof(cb));
 
-       next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+       next = skb_gso_segment(skb, netdev_features);
        skb_shinfo(skb)->gso_size = mss;
        if (WARN_ON_ONCE(IS_ERR(next)))
                return -EINVAL;
@@ -632,6 +761,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 }
 #endif
 
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_sta *mvm_sta, u8 tid,
+                                 struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       u8 mac_queue = info->hw_queue;
+       struct sk_buff_head *deferred_tx_frames;
+
+       lockdep_assert_held(&mvm_sta->lock);
+
+       mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+       set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+       deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+       skb_queue_tail(deferred_tx_frames, skb);
+
+       /*
+        * The first deferred frame should've stopped the MAC queues, so we
+        * should never get a second deferred frame for the RA/TID.
+        */
+       if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+                 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+                 skb_queue_len(deferred_tx_frames))) {
+               iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+               schedule_work(&mvm->add_stream_wk);
+       }
+}
+
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
@@ -647,7 +805,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
-       bool is_data_qos = false, is_ampdu = false;
+       bool is_ampdu = false;
        int hdrlen;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -687,8 +845,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                seq_number &= IEEE80211_SCTL_SEQ;
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
                hdr->seq_ctrl |= cpu_to_le16(seq_number);
-               is_data_qos = true;
                is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+       } else if (iwl_mvm_is_dqa_supported(mvm) &&
+                  (ieee80211_is_qos_nullfunc(fc) ||
+                   ieee80211_is_nullfunc(fc))) {
+               /*
+                * nullfunc frames should go to the MGMT queue regardless of QOS
+                */
+               tid = IWL_MAX_TID_COUNT;
+               txq_id = mvmsta->tid_data[tid].txq_id;
        }
 
        /* Copy MAC header from skb into command buffer */
@@ -709,13 +874,30 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                txq_id = mvmsta->tid_data[tid].txq_id;
        }
 
+       if (iwl_mvm_is_dqa_supported(mvm)) {
+               if (unlikely(mvmsta->tid_data[tid].txq_id ==
+                            IEEE80211_INVAL_HW_QUEUE)) {
+                       iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+                       /*
+                        * The frame is now deferred, and the worker scheduled
+                        * will re-allocate it, so we can free it for now.
+                        */
+                       iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+                       spin_unlock(&mvmsta->lock);
+                       return 0;
+               }
+
+               txq_id = mvmsta->tid_data[tid].txq_id;
+       }
+
        IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
                     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
                goto drop_unlock_sta;
 
-       if (is_data_qos && !ieee80211_has_morefrags(fc))
+       if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
                mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
 
        spin_unlock(&mvmsta->lock);
@@ -870,7 +1052,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
 #endif /* CONFIG_IWLWIFI_DEBUG */
 
 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
-                              enum ieee80211_band band,
+                              enum nl80211_band band,
                               struct ieee80211_tx_rate *r)
 {
        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
index 53cdc5760f6839b68a5d944da2a55a61fa97f378..f0ffd62f02d331b820dd5bb11ec05cceba6f87de 100644 (file)
@@ -217,14 +217,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
 };
 
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
-                                       enum ieee80211_band band)
+                                       enum nl80211_band band)
 {
        int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
        int idx;
        int band_offset = 0;
 
        /* Legacy rate format, search for match in table */
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                band_offset = IWL_FIRST_OFDM_RATE;
        for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
                if (fw_rate_idx_to_plcp[idx] == rate)
@@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table_v1 table;
-       u32 base;
-
-       base = mvm->error_event_table;
-       if (mvm->cur_ucode == IWL_UCODE_INIT) {
-               if (!base)
-                       base = mvm->fw->init_errlog_ptr;
-       } else {
-               if (!base)
-                       base = mvm->fw->inst_errlog_ptr;
-       }
-
-       if (base < 0x800000) {
-               IWL_ERR(mvm,
-                       "Not valid error log pointer 0x%08X for %s uCode\n",
-                       base,
-                       (mvm->cur_ucode == IWL_UCODE_INIT)
-                                       ? "Init" : "RT");
-               return;
-       }
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       /* Do not change this output - scripts rely on it */
-
-       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
-       trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
-                                     table.data1, table.data2, table.data3,
-                                     table.blink2, table.ilink1, table.ilink2,
-                                     table.bcon_time, table.gp1, table.gp2,
-                                     table.gp3, table.ucode_ver, 0,
-                                     table.hw_ver, table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
-               desc_lookup(table.error_id));
-       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
-       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
-       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
-       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
-       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
-       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
-       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
-       IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
-       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
-       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
-       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
-       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
-       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
-       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
-       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
-       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
-       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
-       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
-       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
-       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-
-       if (mvm->support_umac_log)
-               iwl_mvm_dump_umac_error_log(mvm);
-}
-
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
        struct iwl_error_event_table table;
        u32 base;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
-               iwl_mvm_dump_nic_error_log_old(mvm);
-               return;
-       }
-
        base = mvm->error_event_table;
        if (mvm->cur_ucode == IWL_UCODE_INIT) {
                if (!base)
@@ -694,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        mvm->queue_info[queue].hw_queue_refcount++;
        if (mvm->queue_info[queue].hw_queue_refcount > 1)
                enable_queue = false;
+       else
+               mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
        mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -779,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                return;
        }
 
+       cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+
        /* Make sure queue info is correct even though we overwrite it */
        WARN(mvm->queue_info[queue].hw_queue_refcount ||
             mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +997,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 out:
        ieee80211_connection_loss(vif);
 }
+
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+                        enum iwl_lqm_cmd_operatrions operation,
+                        u32 duration, u32 timeout)
+{
+       struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_link_qual_msrmnt_cmd cmd = {
+               .cmd_operation = cpu_to_le32(operation),
+               .mac_id = cpu_to_le32(mvm_vif->id),
+               .measurement_time = cpu_to_le32(duration),
+               .timeout = cpu_to_le32(timeout),
+       };
+       u32 cmdid =
+               iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
+       int ret;
+
+       if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
+               return -EOPNOTSUPP;
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return -EINVAL;
+
+       switch (operation) {
+       case LQM_CMD_OPERATION_START_MEASUREMENT:
+               if (iwl_mvm_lqm_active(mvm_vif->mvm))
+                       return -EBUSY;
+               if (!vif->bss_conf.assoc)
+                       return -EINVAL;
+               mvm_vif->lqm_active = true;
+               break;
+       case LQM_CMD_OPERATION_STOP_MEASUREMENT:
+               if (!iwl_mvm_lqm_active(mvm_vif->mvm))
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
+                                  &cmd);
+
+       /* command failed - roll back lqm_active state */
+       if (ret) {
+               mvm_vif->lqm_active =
+                       operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
+       }
+
+       return ret;
+}
+
+static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+       bool *lqm_active = _data;
+
+       *lqm_active = *lqm_active || mvm_vif->lqm_active;
+}
+
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
+{
+       bool ret = false;
+
+       lockdep_assert_held(&mvm->mutex);
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_lqm_active_iterator, &ret);
+
+       return ret;
+}
index 05b968506836af92426ee275d8efb35dc78c64cb..de42066fa49b6d07cf8ce84400317f981e7adc25 100644 (file)
@@ -479,21 +479,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -651,10 +663,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* The PCI device starts with a reference taken and we are
         * supposed to release it here.  But to simplify the
         * interaction with the opmode, we don't do it now, but let
-        * the opmode release it when it's ready.  To account for this
-        * reference, we start with ref_count set to 1.
+        * the opmode release it when it's ready.
         */
-       trans_pcie->ref_count = 1;
 
        return 0;
 
index dadafbdef9d946199b8be927a2478fea079150b1..9ce4ec6cab2f1fcfc994f1d01c9eab859c12a351 100644 (file)
@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
        struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
-       struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
+       struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
        struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
        bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
-       /* protect ref counter */
-       spinlock_t ref_lock;
-       u32 ref_count;
-
        dma_addr_t fw_mon_phys;
        struct page *fw_mon_page;
        u32 fw_mon_size;
index 4be3c35afd1928913c31b7b4fccb45f5776b5721..7f8a2322cda2ca4f2e85b6e67bc2b4cb2fab7334 100644 (file)
@@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
        if (trans->cfg->mq_rx_supported)
                iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
                               rxq->write_actual);
-       else
-               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+       /*
+        * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+        * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+        * not wake the NIC.
+        */
+       iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -908,6 +912,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        allocator_pool_size = trans->num_rx_queues *
                (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
        num_alloc = queue_size + allocator_pool_size;
+       BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+                    ARRAY_SIZE(trans_pcie->rx_pool));
        for (i = 0; i < num_alloc; i++) {
                struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
 
@@ -1805,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        struct msix_entry *entry = dev_id;
        struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
        struct iwl_trans *trans = trans_pcie->trans;
-       struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+       struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta_fh, inta_hw;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
index eb39c7e09781e9dd04a159f04449a86bc93cc1b5..ee081c2225bad238e9b21cc50802440704b7c06a 100644 (file)
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
         */
        val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
        if (val & (BIT(1) | BIT(17))) {
-               IWL_INFO(trans,
-                        "can't access the RSA semaphore it is write protected\n");
+               IWL_DEBUG_INFO(trans,
+                              "can't access the RSA semaphore it is write protected\n");
                return 0;
        }
 
@@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
         * after this call.
         */
        iwl_pcie_reset_ict(trans);
+       iwl_enable_interrupts(trans);
 
        iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
        int ret, i;
 
        if (trans->cfg->mq_rx_supported) {
-               max_vector = min_t(u32, (num_possible_cpus() + 1),
+               max_vector = min_t(u32, (num_possible_cpus() + 2),
                                   IWL_MAX_RX_HW_QUEUES);
                for (i = 0; i < max_vector; i++)
                        trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
 
        ret = pci_enable_msi(pdev);
        if (ret) {
-               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+               dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
                        IWL_ERR(trans_pcie->trans,
                                "Error allocating IRQ %d\n", i);
                        for (j = 0; j < i; j++)
-                               free_irq(trans_pcie->msix_entries[i].vector,
-                                        &trans_pcie->msix_entries[i]);
+                               free_irq(trans_pcie->msix_entries[j].vector,
+                                        &trans_pcie->msix_entries[j]);
                        pci_disable_msix(pdev);
                        return ret;
                }
@@ -1694,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        }
 
        free_percpu(trans_pcie->tso_hdr_page);
+       mutex_destroy(&trans_pcie->mutex);
        iwl_trans_free(trans);
 }
 
@@ -2014,38 +2016,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
 void iwl_trans_pcie_ref(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       unsigned long flags;
 
        if (iwlwifi_mod_params.d0i3_disable)
                return;
 
-       spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-       IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-       trans_pcie->ref_count++;
        pm_runtime_get(&trans_pcie->pci_dev->dev);
-       spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+
+#ifdef CONFIG_PM
+       IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+                     atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 void iwl_trans_pcie_unref(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       unsigned long flags;
 
        if (iwlwifi_mod_params.d0i3_disable)
                return;
 
-       spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-       IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-       if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
-               spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
-               return;
-       }
-       trans_pcie->ref_count--;
-
        pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
        pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
 
-       spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+#ifdef CONFIG_PM
+       IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+                     atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 static const char *get_csr_string(int cmd)
@@ -2793,7 +2789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
-       spin_lock_init(&trans_pcie->ref_lock);
        mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
index 16ad820ca8248717a26a050d165e1011aec301ca..e1f7a3febb50a249d94133fb62c72233044c9886 100644 (file)
@@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
        }
 }
 
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&trans_pcie->reg_lock);
+
+       if (trans_pcie->ref_cmd_in_flight) {
+               trans_pcie->ref_cmd_in_flight = false;
+               IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+               iwl_trans_pcie_unref(trans);
+       }
+
+       if (!trans->cfg->base_params->apmg_wake_up_wa)
+               return;
+       if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+               return;
+
+       trans_pcie->cmd_hold_nic_awake = false;
+       __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
 /*
  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
  */
@@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                }
                iwl_pcie_txq_free_tfd(trans, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+
+               if (q->read_ptr == q->write_ptr) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+                       if (txq_id != trans_pcie->cmd_queue) {
+                               IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+                                             q->id);
+                               iwl_trans_pcie_unref(trans);
+                       } else {
+                               iwl_pcie_clear_cmd_in_flight(trans);
+                       }
+                       spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+               }
        }
        txq->active = false;
 
@@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       lockdep_assert_held(&trans_pcie->reg_lock);
-
-       if (trans_pcie->ref_cmd_in_flight) {
-               trans_pcie->ref_cmd_in_flight = false;
-               IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
-               iwl_trans_pcie_unref(trans);
-       }
-
-       if (trans->cfg->base_params->apmg_wake_up_wa) {
-               if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
-                       return 0;
-
-               trans_pcie->cmd_hold_nic_awake = false;
-               __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-                                          CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-       }
-       return 0;
-}
-
 /*
  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
  *
@@ -2197,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        __le16 fc;
        u8 hdr_len;
        u16 wifi_seq;
+       bool amsdu;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -2288,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         */
        len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
              hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
-       tb1_len = ALIGN(len, 4);
-
-       /* Tell NIC about any 2-byte padding after MAC header */
-       if (tb1_len != len)
-               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+       /* do not align A-MSDU to dword as the subframe header aligns it */
+       amsdu = ieee80211_is_data_qos(fc) &&
+               (*ieee80211_get_qos_ctl(hdr) &
+                IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+       if (trans_pcie->sw_csum_tx || !amsdu) {
+               tb1_len = ALIGN(len, 4);
+               /* Tell NIC about any 2-byte padding after MAC header */
+               if (tb1_len != len)
+                       tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+       } else {
+               tb1_len = len;
+       }
 
        /* The first TB points to the scratchbuf data - min_copy bytes */
        memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                goto out_err;
        iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
 
-       if (ieee80211_is_data_qos(fc) &&
-           (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+       if (amsdu) {
                if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
                                                     out_meta, dev_cmd,
                                                     tb1_len)))
index 515aa3f993f3dd8d1a65936472bfa6b00f4420de..a8a9bd8e176a35f3e56f50f32f8fe00c685d6440 100644 (file)
@@ -1794,7 +1794,7 @@ static int prism2_transmit(struct net_device *dev, int idx)
                netif_wake_queue(dev);
                return -1;
        }
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* Since we did not wait for command completion, the card continues
         * to process on the background and we will finish handling when
index 0f6ea316e38ed419ba0ef5ca88d14284a8f84029..7aa47069af0a771725c8c158ac2c1cae35c484f9 100644 (file)
@@ -60,14 +60,14 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
                if (priv->channel_mask & (1 << i)) {
                        priv->channels[i].center_freq =
                                ieee80211_channel_to_frequency(i + 1,
-                                                          IEEE80211_BAND_2GHZ);
+                                                          NL80211_BAND_2GHZ);
                        channels++;
                }
        }
        priv->band.channels = priv->channels;
        priv->band.n_channels = channels;
 
-       wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
        i = 0;
@@ -175,7 +175,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
        if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
                return -EINVAL;
 
-       if (chandef->chan->band != IEEE80211_BAND_2GHZ)
+       if (chandef->chan->band != NL80211_BAND_2GHZ)
                return -EINVAL;
 
        channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
index e27e32851f1e50f8116c14c55f3bd2f405aee17f..61af5a28f269fd562ffceb1eea1a01122a080b73 100644 (file)
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
                goto out;
 
        }
-       freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+       freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 
  out:
        orinoco_unlock(priv, &flags);
index 7b5c554323c73ab40caddb47ca965560d787d127..7afe2004e93064f8954a5dcd8940f06b4f86dab5 100644 (file)
@@ -1794,7 +1794,7 @@ void orinoco_reset(struct work_struct *work)
                        printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
                               dev->name, err);
                } else
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
        }
 
        orinoco_unlock_irq(priv);
index f2cd513d54b2c21673552470efd6083d3bc5dcbe..56f109bc83945d351e35888dd1091fcd6c42ac28 100644 (file)
@@ -1275,7 +1275,7 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
                goto busy;
        }
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        stats->tx_bytes += skb->len;
        goto ok;
 
index 2c66166add705ca64b0390cfe4602d2d15c70fdd..d0ceb06c72d08a98b172182eb6ce738a949893c8 100644 (file)
@@ -111,7 +111,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
        }
 
        freq = ieee80211_channel_to_frequency(
-               le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
+               le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
        channel = ieee80211_get_channel(wiphy, freq);
        if (!channel) {
                printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -148,7 +148,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
        ie_len = len - sizeof(*bss);
        ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
        chan = ie ? ie[2] : 0;
-       freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
+       freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
        channel = ieee80211_get_channel(wiphy, freq);
 
        timestamp = le64_to_cpu(bss->timestamp);
index 2fe713eda7adaa0230a05bdce00f10b7e0d5f2b4..d4c73d39336fce1e1d8cc3563fc534bd8e2da140 100644 (file)
@@ -76,14 +76,14 @@ struct p54_channel_entry {
        u16 data;
        int index;
        int max_power;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 };
 
 struct p54_channel_list {
        struct p54_channel_entry *channels;
        size_t entries;
        size_t max_entries;
-       size_t band_channel_num[IEEE80211_NUM_BANDS];
+       size_t band_channel_num[NUM_NL80211_BANDS];
 };
 
 static int p54_get_band_from_freq(u16 freq)
@@ -91,10 +91,10 @@ static int p54_get_band_from_freq(u16 freq)
        /* FIXME: sync these values with the 802.11 spec */
 
        if ((freq >= 2412) && (freq <= 2484))
-               return IEEE80211_BAND_2GHZ;
+               return NL80211_BAND_2GHZ;
 
        if ((freq >= 4920) && (freq <= 5825))
-               return IEEE80211_BAND_5GHZ;
+               return NL80211_BAND_5GHZ;
 
        return -1;
 }
@@ -124,16 +124,16 @@ static int p54_compare_rssichan(const void *_a,
 
 static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
                                  struct ieee80211_supported_band *band_entry,
-                                 enum ieee80211_band band)
+                                 enum nl80211_band band)
 {
        /* TODO: generate rate array dynamically */
 
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                band_entry->bitrates = p54_bgrates;
                band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                band_entry->bitrates = p54_arates;
                band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
                break;
@@ -147,7 +147,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
 static int p54_generate_band(struct ieee80211_hw *dev,
                             struct p54_channel_list *list,
                             unsigned int *chan_num,
-                            enum ieee80211_band band)
+                            enum nl80211_band band)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_supported_band *tmp, *old;
@@ -206,7 +206,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 
        if (j == 0) {
                wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
-                         (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
+                         (band == NL80211_BAND_2GHZ) ? 2 : 5);
 
                ret = -ENODATA;
                goto err_out;
@@ -396,7 +396,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
             p54_compare_channels, NULL);
 
        k = 0;
-       for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) {
                if (p54_generate_band(dev, list, &k, i) == 0)
                        j++;
        }
@@ -573,10 +573,10 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
                for (i = 0; i < entries; i++) {
                        u16 freq = 0;
                        switch (i) {
-                       case IEEE80211_BAND_2GHZ:
+                       case NL80211_BAND_2GHZ:
                                freq = 2437;
                                break;
-                       case IEEE80211_BAND_5GHZ:
+                       case NL80211_BAND_5GHZ:
                                freq = 5240;
                                break;
                        }
@@ -902,11 +902,11 @@ good_eeprom:
        if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
                p54_init_xbow_synth(priv);
        if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
-               dev->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       priv->band_table[IEEE80211_BAND_2GHZ];
+               dev->wiphy->bands[NL80211_BAND_2GHZ] =
+                       priv->band_table[NL80211_BAND_2GHZ];
        if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
-               dev->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       priv->band_table[IEEE80211_BAND_5GHZ];
+               dev->wiphy->bands[NL80211_BAND_5GHZ] =
+                       priv->band_table[NL80211_BAND_5GHZ];
        if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
                priv->rx_diversity_mask = 3;
        if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
index 7805864e76f902a167d2cc647cb39511f691467b..d5a3bf91a03e7b838788342fdc7257d6674dbcd4 100644 (file)
@@ -477,7 +477,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
                p54_set_edcf(priv);
        }
        if (changed & BSS_CHANGED_BASIC_RATES) {
-               if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+               if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ)
                        priv->basic_rate_mask = (info->basic_rates << 4);
                else
                        priv->basic_rate_mask = info->basic_rates;
@@ -829,7 +829,7 @@ void p54_free_common(struct ieee80211_hw *dev)
        struct p54_common *priv = dev->priv;
        unsigned int i;
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+       for (i = 0; i < NUM_NL80211_BANDS; i++)
                kfree(priv->band_table[i]);
 
        kfree(priv->iq_autocal);
index 40b401ed68458d92f71789d7c22fed20a6104039..529939e611cd579ae174ffafd66895c3cec9fa19 100644 (file)
@@ -223,7 +223,7 @@ struct p54_common {
        struct p54_cal_database *curve_data;
        struct p54_cal_database *output_limit;
        struct p54_cal_database *rssi_db;
-       struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS];
 
        /* BBP/MAC state */
        u8 mac_addr[ETH_ALEN];
index 24e5ff9a92726d066acb64a25a40da59cb735486..1af7da0b386e56623f9e40ab8bfd487c10c9d637 100644 (file)
@@ -353,7 +353,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
        rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
        if (hdr->rate & 0x10)
                rx_status->flag |= RX_FLAG_SHORTPRE;
-       if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+       if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
                rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
        else
                rx_status->rate_idx = rate;
@@ -867,7 +867,7 @@ void p54_tx_80211(struct ieee80211_hw *dev,
        for (i = 0; i < nrates && ridx < 8; i++) {
                /* we register the rates in perfect order */
                rate = info->control.rates[i].idx;
-               if (info->band == IEEE80211_BAND_5GHZ)
+               if (info->band == NL80211_BAND_5GHZ)
                        rate += 4;
 
                /* store the count we actually calculated for TX status */
index 333c1a2f882ed08b1c46087bea9a27009095050f..6700387ef9ab65f5e5f2bf4884ba8369c102cfbd 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/delay.h>
+#include <linux/ktime.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -113,7 +114,7 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
 
 #if VERBOSE > SHOW_ERROR_MESSAGES
        u32 counter = 0;
-       struct timeval current_time;
+       struct timespec64 current_ts64;
        DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
 #endif
 
@@ -121,22 +122,22 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
        if (asleep) {
                /* device is in powersave, trigger the device for wakeup */
 #if VERBOSE > SHOW_ERROR_MESSAGES
-               do_gettimeofday(&current_time);
-               DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
-                     current_time.tv_sec, (long)current_time.tv_usec);
+               ktime_get_real_ts64(&current_ts64);
+               DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n",
+                     (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
 
-               DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
-                     current_time.tv_sec, (long)current_time.tv_usec,
+               DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n",
+                     (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
                      readl(device_base + ISL38XX_CTRL_STAT_REG));
 #endif
 
                reg = readl(device_base + ISL38XX_INT_IDENT_REG);
                if (reg == 0xabadface) {
 #if VERBOSE > SHOW_ERROR_MESSAGES
-                       do_gettimeofday(&current_time);
+                       ktime_get_real_ts64(&current_ts64);
                        DEBUG(SHOW_TRACING,
-                             "%08li.%08li Device register abadface\n",
-                             current_time.tv_sec, (long)current_time.tv_usec);
+                             "%lld.%09ld Device register abadface\n",
+                             (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
 #endif
                        /* read the Device Status Register until Sleepmode bit is set */
                        while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
@@ -149,13 +150,13 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
 
 #if VERBOSE > SHOW_ERROR_MESSAGES
                        DEBUG(SHOW_TRACING,
-                             "%08li.%08li Device register read %08x\n",
-                             current_time.tv_sec, (long)current_time.tv_usec,
+                             "%lld.%09ld Device register read %08x\n",
+                             (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
                              readl(device_base + ISL38XX_CTRL_STAT_REG));
-                       do_gettimeofday(&current_time);
+                       ktime_get_real_ts64(&current_ts64);
                        DEBUG(SHOW_TRACING,
-                             "%08li.%08li Device asleep counter %i\n",
-                             current_time.tv_sec, (long)current_time.tv_usec,
+                             "%lld.%09ld Device asleep counter %i\n",
+                             (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
                              counter);
 #endif
                }
@@ -168,9 +169,9 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
 
                /* perform another read on the Device Status Register */
                reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
-               do_gettimeofday(&current_time);
-               DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
-                     current_time.tv_sec, (long)current_time.tv_usec, reg);
+               ktime_get_real_ts64(&current_ts64);
+               DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n",
+                     (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg);
 #endif
        } else {
                /* device is (still) awake  */
index 2b185feb1aa029aab22c98fec9976ce01d1ddf7f..9ed0ed1bf51481e4458dbeae2d105731bbf0f575 100644 (file)
@@ -255,14 +255,14 @@ static struct class *hwsim_class;
 static struct net_device *hwsim_mon; /* global monitor netdev */
 
 #define CHAN2G(_freq)  { \
-       .band = IEEE80211_BAND_2GHZ, \
+       .band = NL80211_BAND_2GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_freq), \
        .max_power = 20, \
 }
 
 #define CHAN5G(_freq) { \
-       .band = IEEE80211_BAND_5GHZ, \
+       .band = NL80211_BAND_5GHZ, \
        .center_freq = (_freq), \
        .hw_value = (_freq), \
        .max_power = 20, \
@@ -479,7 +479,7 @@ struct mac80211_hwsim_data {
        struct list_head list;
        struct ieee80211_hw *hw;
        struct device *dev;
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
        struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
        struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
        struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -1030,7 +1030,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        data->pending_cookie++;
        cookie = data->pending_cookie;
        info->rate_driver_data[0] = (void *)cookie;
-       if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie))
+       if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
@@ -2347,7 +2347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        u8 addr[ETH_ALEN];
        struct mac80211_hwsim_data *data;
        struct ieee80211_hw *hw;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        int idx;
 
@@ -2476,16 +2476,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                sizeof(hwsim_channels_5ghz));
        memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband = &data->bands[band];
                switch (band) {
-               case IEEE80211_BAND_2GHZ:
+               case NL80211_BAND_2GHZ:
                        sband->channels = data->channels_2ghz;
                        sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
                        sband->bitrates = data->rates;
                        sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
                        break;
-               case IEEE80211_BAND_5GHZ:
+               case NL80211_BAND_5GHZ:
                        sband->channels = data->channels_5ghz;
                        sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
                        sband->bitrates = data->rates + 4;
index 66e1c73bd50716af22d5f51db4f9fe179f81fadf..39f22467ca2a219809cd35c38a2f5c28d15595bc 100644 (file)
@@ -148,6 +148,7 @@ enum {
        HWSIM_ATTR_RADIO_NAME,
        HWSIM_ATTR_NO_VIF,
        HWSIM_ATTR_FREQ,
+       HWSIM_ATTR_PAD,
        __HWSIM_ATTR_MAX,
 };
 #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
index 2eea76a340b7b44087d160304faf0a9767882d12..776b44bfd93a4c1a44c1da86f5a7e0cdc4aedb86 100644 (file)
@@ -23,7 +23,7 @@
 
 
 #define CHAN2G(_channel, _freq, _flags) {        \
-       .band             = IEEE80211_BAND_2GHZ, \
+       .band             = NL80211_BAND_2GHZ, \
        .center_freq      = (_freq),             \
        .hw_value         = (_channel),          \
        .flags            = (_flags),            \
@@ -639,7 +639,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                if (chan_no != -1) {
                        struct wiphy *wiphy = priv->wdev->wiphy;
                        int freq = ieee80211_channel_to_frequency(chan_no,
-                                                       IEEE80211_BAND_2GHZ);
+                                                       NL80211_BAND_2GHZ);
                        struct ieee80211_channel *channel =
                                ieee80211_get_channel(wiphy, freq);
 
@@ -1266,7 +1266,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
 {
        struct cfg80211_scan_request *creq = NULL;
        int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
                       n_channels * sizeof(void *),
@@ -1281,7 +1281,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
 
        /* Scan all available channels */
        i = 0;
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                int j;
 
                if (!wiphy->bands[band])
@@ -2200,7 +2200,7 @@ int lbs_cfg_register(struct lbs_private *priv)
        if (lbs_mesh_activated(priv))
                wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
 
-       wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
+       wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
 
        /*
         * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
index 4ddd0e5a6b85c68102483b7f42e27cab096a2a9f..301170cccfff7494b95dfa195ac03c3a11fb271e 100644 (file)
@@ -743,7 +743,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
        struct cmd_ds_802_11d_domain_info cmd;
        struct mrvl_ie_domain_param_set *domain = &cmd.domain;
        struct ieee80211_country_ie_triplet *t;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_channel *ch;
        u8 num_triplet = 0;
        u8 num_parsed_chan = 0;
@@ -777,7 +777,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
         * etc.
         */
        for (band = 0;
-            (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
+            (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
             band++) {
 
                if (!bands[band])
index a47f0acc099a581447dda6ca16b5eb4c561e25d7..0bf8916a02cfe505ea71f38a364e19eddb3d9542 100644 (file)
@@ -570,7 +570,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
        if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
                stats.flag |= RX_FLAG_FAILED_FCS_CRC;
        stats.freq = priv->cur_freq;
-       stats.band = IEEE80211_BAND_2GHZ;
+       stats.band = NL80211_BAND_2GHZ;
        stats.signal = prxpd->snr;
        priv->noise = prxpd->nf;
        /* Marvell rate index has a hole at value 4 */
@@ -642,7 +642,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
        priv->band.bitrates = priv->rates;
        priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
        priv->band.channels = priv->channels;
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_STATION) |
                BIT(NL80211_IFTYPE_ADHOC);
index 09578c6cde598da872cdc688afe6d9ae7a19cb1e..a74cc43b19533b6be962c22890670deb00eab924 100644 (file)
@@ -59,7 +59,10 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
                                                                  skb->len);
                        }
 
-                       ret = mwifiex_recv_packet(priv, rx_skb);
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               ret = mwifiex_uap_recv_packet(priv, rx_skb);
+                       else
+                               ret = mwifiex_recv_packet(priv, rx_skb);
                        if (ret == -1)
                                mwifiex_dbg(priv->adapter, ERROR,
                                            "Rx of A-MSDU failed");
index bb7235e1b9d16fd6024a131aefda6f1c81039761..ff948a9222225dee00b4b46ee0dec095e71726c0 100644 (file)
@@ -474,7 +474,7 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
        u8 no_of_parsed_chan = 0;
        u8 first_chan = 0, next_chan = 0, max_pwr = 0;
        u8 i, flag = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -1410,7 +1410,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
 
@@ -1586,7 +1586,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct mwifiex_adapter *adapter = priv->adapter;
 
        if (!priv->media_connected) {
@@ -1600,11 +1600,11 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
        memset(bitmap_rates, 0, sizeof(bitmap_rates));
 
        /* Fill HR/DSSS rates. */
-       if (band == IEEE80211_BAND_2GHZ)
+       if (band == NL80211_BAND_2GHZ)
                bitmap_rates[0] = mask->control[band].legacy & 0x000f;
 
        /* Fill OFDM rates */
-       if (band == IEEE80211_BAND_2GHZ)
+       if (band == NL80211_BAND_2GHZ)
                bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
        else
                bitmap_rates[1] = mask->control[band].legacy;
@@ -1771,7 +1771,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
        } else {
                struct ieee80211_sta_ht_cap *ht_info;
                int rx_mcs_supp;
-               enum ieee80211_band band;
+               enum nl80211_band band;
 
                if ((tx_ant == 0x1 && rx_ant == 0x1)) {
                        adapter->user_dev_mcs_support = HT_STREAM_1X1;
@@ -1785,7 +1785,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
                                                MWIFIEX_11AC_MCS_MAP_2X2;
                }
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        if (!adapter->wiphy->bands[band])
                                continue;
 
@@ -1997,7 +1997,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
        struct cfg80211_bss *bss;
        int ie_len;
        u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        if (mwifiex_get_bss_info(priv, &bss_info))
                return -1;
@@ -2271,7 +2271,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
        int index = 0, i;
        u8 config_bands = 0;
 
-       if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+       if (params->chandef.chan->band == NL80211_BAND_2GHZ) {
                if (!params->basic_rates) {
                        config_bands = BAND_B | BAND_G;
                } else {
@@ -2859,18 +2859,18 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        mwifiex_init_priv_params(priv, dev);
        priv->netdev = dev;
 
-       mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+       mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
        if (adapter->is_hw_11ac_capable)
                mwifiex_setup_vht_caps(
-                       &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv);
+                       &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv);
 
        if (adapter->config_bands & BAND_A)
                mwifiex_setup_ht_caps(
-                       &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+                       &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv);
 
        if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
                mwifiex_setup_vht_caps(
-                       &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
+                       &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv);
 
        dev_net_set(dev, wiphy_net(wiphy));
        dev->ieee80211_ptr = &priv->wdev;
@@ -3272,8 +3272,11 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
        for (i = 0; i < adapter->priv_num; i++) {
                priv = adapter->priv[i];
-               if (priv && priv->netdev)
+               if (priv && priv->netdev) {
                        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+                       if (netif_carrier_ok(priv->netdev))
+                               netif_carrier_off(priv->netdev);
+               }
        }
 
        for (i = 0; i < retry_num; i++) {
@@ -3341,13 +3344,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
        struct mwifiex_ds_wakeup_reason wakeup_reason;
        struct cfg80211_wowlan_wakeup wakeup_report;
        int i;
+       bool report_wakeup_reason = true;
 
        for (i = 0; i < adapter->priv_num; i++) {
                priv = adapter->priv[i];
-               if (priv && priv->netdev)
+               if (priv && priv->netdev) {
+                       if (!netif_carrier_ok(priv->netdev))
+                               netif_carrier_on(priv->netdev);
                        mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+               }
        }
 
+       if (!wiphy->wowlan_config)
+               goto done;
+
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
        mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
                                  &wakeup_reason);
@@ -3380,19 +3390,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
                if (wiphy->wowlan_config->n_patterns)
                        wakeup_report.pattern_idx = 1;
                break;
-       case CONTROL_FRAME_MATCHED:
-               break;
-       case    MANAGEMENT_FRAME_MATCHED:
+       case GTK_REKEY_FAILURE:
+               if (wiphy->wowlan_config->gtk_rekey_failure)
+                       wakeup_report.gtk_rekey_failure = true;
                break;
        default:
+               report_wakeup_reason = false;
                break;
        }
 
-       if ((wakeup_reason.hs_wakeup_reason > 0) &&
-           (wakeup_reason.hs_wakeup_reason <= 7))
+       if (report_wakeup_reason)
                cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
                                              GFP_KERNEL);
 
+done:
        if (adapter->nd_info) {
                for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
                        kfree(adapter->nd_info->matches[i]);
@@ -3410,6 +3421,16 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
 
        device_set_wakeup_enable(adapter->dev, enabled);
 }
+
+static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+                                 struct cfg80211_gtk_rekey_data *data)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+                               HostCmd_ACT_GEN_SET, 0, data, true);
+}
+
 #endif
 
 static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
@@ -3801,7 +3822,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
        struct ieee80211_channel *chan;
        u8 second_chan_offset;
        enum nl80211_channel_type chan_type;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int freq;
        int ret = -ENODATA;
 
@@ -3932,6 +3953,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .suspend = mwifiex_cfg80211_suspend,
        .resume = mwifiex_cfg80211_resume,
        .set_wakeup = mwifiex_cfg80211_set_wakeup,
+       .set_rekey_data = mwifiex_set_rekey_data,
 #endif
        .set_coalesce = mwifiex_cfg80211_set_coalesce,
        .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
@@ -3948,7 +3970,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
 #ifdef CONFIG_PM
 static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
        .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
-               WIPHY_WOWLAN_NET_DETECT,
+               WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+               WIPHY_WOWLAN_GTK_REKEY_FAILURE,
        .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
        .pattern_min_len = 1,
        .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -4031,11 +4054,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                                 BIT(NL80211_IFTYPE_P2P_GO) |
                                 BIT(NL80211_IFTYPE_AP);
 
-       wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+       wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
        if (adapter->config_bands & BAND_A)
-               wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+               wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
        else
-               wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+               wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
        if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
                wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4086,6 +4109,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 
        wiphy->features |= NL80211_FEATURE_HT_IBSS |
                           NL80211_FEATURE_INACTIVITY_TIMER |
+                          NL80211_FEATURE_LOW_PRIORITY_SCAN |
                           NL80211_FEATURE_NEED_OBSS_SCAN;
 
        if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
index 09fae27140f70b8056a08542798bd2844873cefa..1ff22055e54f84ae667c100a110c29b39fe87b8b 100644 (file)
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
                return cfp;
 
        if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
-               sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
+               sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ];
        else
-               sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
+               sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ];
 
        if (!sband) {
                mwifiex_dbg(priv->adapter, ERROR,
@@ -399,15 +399,15 @@ u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
        int i;
 
        if (radio_type) {
-               sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+               sband = wiphy->bands[NL80211_BAND_5GHZ];
                if (WARN_ON_ONCE(!sband))
                        return 0;
-               rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+               rate_mask = request->rates[NL80211_BAND_5GHZ];
        } else {
-               sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+               sband = wiphy->bands[NL80211_BAND_2GHZ];
                if (WARN_ON_ONCE(!sband))
                        return 0;
-               rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+               rate_mask = request->rates[NL80211_BAND_2GHZ];
        }
 
        num_rates = 0;
index a12adee776c6c6ac76c2f24d3bc44b88a1f5a6af..6bc2011d8609532c276b234aff9ba60f6360a665 100644 (file)
@@ -104,6 +104,47 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
        }
 }
 
+/*
+ * This function returns a command to the command free queue.
+ *
+ * The function also calls the completion callback if required, before
+ * cleaning the command node and re-inserting it into the free queue.
+ */
+static void
+mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
+                            struct cmd_ctrl_node *cmd_node)
+{
+       unsigned long flags;
+
+       if (!cmd_node)
+               return;
+
+       if (cmd_node->wait_q_enabled)
+               mwifiex_complete_cmd(adapter, cmd_node);
+       /* Clean the node */
+       mwifiex_clean_cmd_node(adapter, cmd_node);
+
+       /* Insert node into cmd_free_q */
+       spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+       list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
+       spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+}
+
+/* This function reuses a command node. */
+void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
+                             struct cmd_ctrl_node *cmd_node)
+{
+       struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
+
+       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+
+       atomic_dec(&adapter->cmd_pending);
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+               le16_to_cpu(host_cmd->command),
+               atomic_read(&adapter->cmd_pending));
+}
+
 /*
  * This function sends a host command to the firmware.
  *
@@ -613,47 +654,6 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
        return ret;
 }
 
-/*
- * This function returns a command to the command free queue.
- *
- * The function also calls the completion callback if required, before
- * cleaning the command node and re-inserting it into the free queue.
- */
-void
-mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
-                            struct cmd_ctrl_node *cmd_node)
-{
-       unsigned long flags;
-
-       if (!cmd_node)
-               return;
-
-       if (cmd_node->wait_q_enabled)
-               mwifiex_complete_cmd(adapter, cmd_node);
-       /* Clean the node */
-       mwifiex_clean_cmd_node(adapter, cmd_node);
-
-       /* Insert node into cmd_free_q */
-       spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
-       list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
-       spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
-}
-
-/* This function reuses a command node. */
-void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
-                             struct cmd_ctrl_node *cmd_node)
-{
-       struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
-
-       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-
-       atomic_dec(&adapter->cmd_pending);
-       mwifiex_dbg(adapter, CMD,
-                   "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
-               le16_to_cpu(host_cmd->command),
-               atomic_read(&adapter->cmd_pending));
-}
-
 /*
  * This function queues a command to the command pending queue.
  *
@@ -991,6 +991,23 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                adapter->if_ops.card_reset(adapter);
 }
 
+void
+mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
+{
+       struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
+       unsigned long flags;
+
+       /* Cancel all pending scan command */
+       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+       list_for_each_entry_safe(cmd_node, tmp_node,
+                                &adapter->scan_pending_q, list) {
+               list_del(&cmd_node->list);
+               cmd_node->wait_q_enabled = false;
+               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+       }
+       spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+}
+
 /*
  * This function cancels all the pending commands.
  *
@@ -1009,9 +1026,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
        spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
        /* Cancel current cmd */
        if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
-               adapter->curr_cmd->wait_q_enabled = false;
                adapter->cmd_wait_q.status = -1;
                mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd->wait_q_enabled = false;
                /* no recycle probably wait for response */
        }
        /* Cancel all pending command */
@@ -1029,16 +1046,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
        spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
        spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 
-       /* Cancel all pending scan command */
-       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-       list_for_each_entry_safe(cmd_node, tmp_node,
-                                &adapter->scan_pending_q, list) {
-               list_del(&cmd_node->list);
-
-               cmd_node->wait_q_enabled = false;
-               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-       }
-       spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+       mwifiex_cancel_pending_scan_cmd(adapter);
 
        if (adapter->scan_processing) {
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
@@ -1070,9 +1078,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
 void
 mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
-       struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
+       struct cmd_ctrl_node *cmd_node = NULL;
        unsigned long cmd_flags;
-       unsigned long scan_pending_q_flags;
        struct mwifiex_private *priv;
        int i;
 
@@ -1094,17 +1101,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                mwifiex_recycle_cmd_node(adapter, cmd_node);
        }
 
-       /* Cancel all pending scan command */
-       spin_lock_irqsave(&adapter->scan_pending_q_lock,
-                         scan_pending_q_flags);
-       list_for_each_entry_safe(cmd_node, tmp_node,
-                                &adapter->scan_pending_q, list) {
-               list_del(&cmd_node->list);
-               cmd_node->wait_q_enabled = false;
-               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-       }
-       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                              scan_pending_q_flags);
+       mwifiex_cancel_pending_scan_cmd(adapter);
 
        if (adapter->scan_processing) {
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
index c134cf8652910b0381421f9e7c6ed746401ac719..8e4145abdbfa563df93935bdd7ee23b681aed1b4 100644 (file)
@@ -372,6 +372,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_COALESCE_CFG                      0x010a
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
+#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG             0x010f
 #define HostCmd_CMD_11AC_CFG                         0x0112
 #define HostCmd_CMD_HS_WAKEUP_REASON                  0x0116
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
@@ -619,6 +620,7 @@ enum HS_WAKEUP_REASON {
        MAGIC_PATTERN_MATCHED,
        CONTROL_FRAME_MATCHED,
        MANAGEMENT_FRAME_MATCHED,
+       GTK_REKEY_FAILURE,
        RESERVED
 };
 
@@ -2183,6 +2185,14 @@ struct host_cmd_ds_wakeup_reason {
        u16  wakeup_reason;
 } __packed;
 
+struct host_cmd_ds_gtk_rekey_params {
+       __le16 action;
+       u8 kck[NL80211_KCK_LEN];
+       u8 kek[NL80211_KEK_LEN];
+       __le32 replay_ctr_low;
+       __le32 replay_ctr_high;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2256,6 +2266,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_multi_chan_policy mc_policy;
                struct host_cmd_ds_robust_coex coex;
                struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
+               struct host_cmd_ds_gtk_rekey_params rekey;
        } params;
 } __packed;
 
index 517653b3adabd8481edc73cd8eb08046e44f8484..78c532f0d286b249e9c0dd6e269351e83ad41128 100644 (file)
@@ -317,7 +317,7 @@ void mwifiex_set_trans_start(struct net_device *dev)
        for (i = 0; i < dev->num_tx_queues; i++)
                netdev_get_tx_queue(dev, i)->trans_start = jiffies;
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 }
 
 /*
index 3cfa94677a8e2384bcbcc7ed188baf02814b84c9..8b67a552a690ff47f79c041b6c32ed652c91db28 100644 (file)
@@ -702,6 +702,13 @@ mwifiex_close(struct net_device *dev)
                priv->scan_aborting = true;
        }
 
+       if (priv->sched_scanning) {
+               mwifiex_dbg(priv->adapter, INFO,
+                           "aborting bgscan on ndo_stop\n");
+               mwifiex_stop_bg_scan(priv);
+               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+       }
+
        return 0;
 }
 
@@ -753,13 +760,6 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
 
        mwifiex_queue_main_work(priv->adapter);
 
-       if (priv->sched_scanning) {
-               mwifiex_dbg(priv->adapter, INFO,
-                           "aborting bgscan on ndo_stop\n");
-               mwifiex_stop_bg_scan(priv);
-               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
-       }
-
        return 0;
 }
 
@@ -1074,12 +1074,14 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
                             priv->netdev->name, priv->num_tx_timeout);
        }
 
-       if (adapter->iface_type == MWIFIEX_SDIO) {
-               p += sprintf(p, "\n=== SDIO register dump===\n");
+       if (adapter->iface_type == MWIFIEX_SDIO ||
+           adapter->iface_type == MWIFIEX_PCIE) {
+               p += sprintf(p, "\n=== %s register dump===\n",
+                            adapter->iface_type == MWIFIEX_SDIO ?
+                                                       "SDIO" : "PCIE");
                if (adapter->if_ops.reg_dump)
                        p += adapter->if_ops.reg_dump(adapter, p);
        }
-
        p += sprintf(p, "\n=== more debug information\n");
        debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
        if (debug_info) {
@@ -1432,7 +1434,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
        struct mwifiex_private *priv = NULL;
        int i;
 
-       if (down_interruptible(sem))
+       if (down_trylock(sem))
                goto exit_sem_err;
 
        if (!adapter)
index aafc4ab4e5aed3f65e09f8231e8262ce27520162..0207af00be42d6df18259f23947fbf61b673d0ae 100644 (file)
 #include <linux/idr.h>
 #include <linux/inetdevice.h>
 #include <linux/devcoredump.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -100,8 +111,8 @@ enum {
 #define SCAN_BEACON_ENTRY_PAD                  6
 
 #define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME  30
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME        30
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME  40
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME        40
 #define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME  50
 
 #define SCAN_RSSI(RSSI)                                        (0x100 - ((u8)(RSSI)))
@@ -1019,6 +1030,8 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
 int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
 int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+                           struct sk_buff *skb);
 
 int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
                                struct sk_buff *skb);
@@ -1040,9 +1053,8 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
 int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
 void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
 void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
+void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
 
-void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
-                                 struct cmd_ctrl_node *cmd_node);
 void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
                              struct cmd_ctrl_node *cmd_node);
 
index de364381fe7be82d76c1bb282f7aa0649e1c586b..0c7937eb6b7701d50b55d80c325fd1e091c7522e 100644 (file)
@@ -190,7 +190,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
 
        if (ent->driver_data) {
                struct mwifiex_pcie_device *data = (void *)ent->driver_data;
-               card->pcie.firmware = data->firmware;
                card->pcie.reg = data->reg;
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
@@ -269,6 +268,11 @@ static const struct pci_device_id mwifiex_ids[] = {
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                .driver_data = (unsigned long)&mwifiex_pcie8997,
        },
+       {
+               PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               .driver_data = (unsigned long)&mwifiex_pcie8997,
+       },
        {},
 };
 
@@ -2351,6 +2355,47 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
        return 0;
 }
 
+/* Function to dump PCIE scratch registers in case of FW crash
+ */
+static int
+mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
+{
+       char *p = drv_buf;
+       char buf[256], *ptr;
+       int i;
+       u32 value;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
+                                 PCIE_SCRATCH_13_REG,
+                                 PCIE_SCRATCH_14_REG};
+
+       if (!p)
+               return 0;
+
+       mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
+
+       if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
+               mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
+               return 0;
+       }
+
+       ptr = buf;
+       mwifiex_dbg(adapter, MSG, "pcie scratch register:");
+       for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
+               mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
+               ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
+                              pcie_scratch_reg[i], value);
+       }
+
+       mwifiex_dbg(adapter, MSG, "%s\n", buf);
+       p += sprintf(p, "%s\n", buf);
+
+       mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
+
+       return p - drv_buf;
+}
+
 /* This function read/write firmware */
 static enum rdwr_status
 mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
@@ -2758,6 +2803,68 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+/*
+ * This function get firmare name for downloading by revision id
+ *
+ * Read revision id register to get revision id
+ */
+static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
+{
+       int revision_id = 0;
+       int version;
+       struct pcie_service_card *card = adapter->card;
+
+       switch (card->dev->device) {
+       case PCIE_DEVICE_ID_MARVELL_88W8766P:
+               strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+               break;
+       case PCIE_DEVICE_ID_MARVELL_88W8897:
+               mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
+               mwifiex_read_reg(adapter, 0x0c58, &revision_id);
+               revision_id &= 0xff00;
+               switch (revision_id) {
+               case PCIE8897_A0:
+                       strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
+                       break;
+               case PCIE8897_B0:
+                       strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
+                       break;
+               default:
+                       strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME);
+
+                       break;
+               }
+               break;
+       case PCIE_DEVICE_ID_MARVELL_88W8997:
+               mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+               mwifiex_read_reg(adapter, 0x0cd0, &version);
+               version &= 0x7;
+               switch (revision_id) {
+               case PCIE8997_V2:
+                       if (version == CHIP_VER_PCIEUSB)
+                               strcpy(adapter->fw_name,
+                                      PCIEUSB8997_FW_NAME_V2);
+                       else
+                               strcpy(adapter->fw_name,
+                                      PCIEUART8997_FW_NAME_V2);
+                       break;
+               case PCIE8997_Z:
+                       if (version == CHIP_VER_PCIEUSB)
+                               strcpy(adapter->fw_name,
+                                      PCIEUSB8997_FW_NAME_Z);
+                       else
+                               strcpy(adapter->fw_name,
+                                      PCIEUART8997_FW_NAME_Z);
+                       break;
+               default:
+                       strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
+                       break;
+               }
+       default:
+               break;
+       }
+}
+
 /*
  * This function registers the PCIE device.
  *
@@ -2778,8 +2885,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->tx_buf_size = card->pcie.tx_buf_size;
        adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
        adapter->num_mem_types = card->pcie.num_mem_types;
-       strcpy(adapter->fw_name, card->pcie.firmware);
        adapter->ext_scan = card->pcie.can_ext_scan;
+       mwifiex_pcie_get_fw_name(adapter);
 
        return 0;
 }
@@ -2850,6 +2957,7 @@ static struct mwifiex_if_ops pcie_ops = {
        .cleanup_mpa_buf =              NULL,
        .init_fw_port =                 mwifiex_pcie_init_fw_port,
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
+       .reg_dump =                     mwifiex_pcie_reg_dump,
        .device_dump =                  mwifiex_pcie_device_dump,
 };
 
@@ -2907,6 +3015,3 @@ MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
 MODULE_VERSION(PCIE_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
index 29e58ce877e349649c747080bce913f1063362c8..5770b4396b21f5a83a993dae64e41abc95557f51 100644 (file)
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
-#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
+#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
+#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieuart8997_combo_v2.bin"
+#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
+#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
+#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
+#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
+#define PCIE_VENDOR_ID_V2_MARVELL           (0x1b4b)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P                (0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897         (0x2b38)
 #define PCIE_DEVICE_ID_MARVELL_88W8997         (0x2b42)
 
+#define PCIE8897_A0    0x1100
+#define PCIE8897_B0    0x1200
+#define PCIE8997_Z     0x0
+#define PCIE8997_V2    0x471
+#define CHIP_VER_PCIEUSB       0x2
+
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD                    0x20
 #define MWIFIEX_TXBD_MASK                      0x3F
@@ -65,6 +78,8 @@
 #define PCIE_SCRATCH_10_REG                            0xCE8
 #define PCIE_SCRATCH_11_REG                            0xCEC
 #define PCIE_SCRATCH_12_REG                            0xCF0
+#define PCIE_SCRATCH_13_REG                            0xCF8
+#define PCIE_SCRATCH_14_REG                            0xCFC
 #define PCIE_RD_DATA_PTR_Q0_Q1                          0xC08C
 #define PCIE_WR_DATA_PTR_Q0_Q1                          0xC05C
 
@@ -263,7 +278,6 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
 };
 
 struct mwifiex_pcie_device {
-       const char *firmware;
        const struct mwifiex_pcie_card_reg *reg;
        u16 blksz_fw_dl;
        u16 tx_buf_size;
@@ -274,7 +288,6 @@ struct mwifiex_pcie_device {
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
-       .firmware       = PCIE8766_DEFAULT_FW_NAME,
        .reg            = &mwifiex_reg_8766,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -283,7 +296,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
-       .firmware       = PCIE8897_DEFAULT_FW_NAME,
        .reg            = &mwifiex_reg_8897,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
@@ -294,7 +306,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
-       .firmware       = PCIE8997_DEFAULT_FW_NAME,
        .reg            = &mwifiex_reg_8997,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
index 489f7a911a83f225561a2ddf902c637457b6b960..bc5e52cebce13ef4c8ec2383f6a24f964197ff68 100644 (file)
@@ -76,6 +76,39 @@ static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
        { 0x00, 0x0f, 0xac, 0x04 },     /* AES  */
 };
 
+static void
+_dbg_security_flags(int log_level, const char *func, const char *desc,
+                   struct mwifiex_private *priv,
+                   struct mwifiex_bssdescriptor *bss_desc)
+{
+       _mwifiex_dbg(priv->adapter, log_level,
+                    "info: %s: %s:\twpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\tEncMode=%#x privacy=%#x\n",
+                    func, desc,
+                    bss_desc->bcn_wpa_ie ?
+                    bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0,
+                    bss_desc->bcn_rsn_ie ?
+                    bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0,
+                    priv->sec_info.wep_enabled ? "e" : "d",
+                    priv->sec_info.wpa_enabled ? "e" : "d",
+                    priv->sec_info.wpa2_enabled ? "e" : "d",
+                    priv->sec_info.encryption_mode,
+                    bss_desc->privacy);
+}
+#define dbg_security_flags(mask, desc, priv, bss_desc) \
+       _dbg_security_flags(MWIFIEX_DBG_##mask, desc, __func__, priv, bss_desc)
+
+static bool
+has_ieee_hdr(struct ieee_types_generic *ie, u8 key)
+{
+       return (ie && ie->ieee_hdr.element_id == key);
+}
+
+static bool
+has_vendor_hdr(struct ieee_types_vendor_specific *ie, u8 key)
+{
+       return (ie && ie->vend_hdr.element_id == key);
+}
+
 /*
  * This function parses a given IE for a given OUI.
  *
@@ -121,8 +154,7 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
        struct ie_body *iebody;
        u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
-       if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
-                                       ieee_hdr.element_id == WLAN_EID_RSN))) {
+       if (has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
                iebody = (struct ie_body *)
                         (((u8 *) bss_desc->bcn_rsn_ie->data) +
                          RSN_GTK_OUI_OFFSET);
@@ -148,9 +180,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
        struct ie_body *iebody;
        u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
-       if (((bss_desc->bcn_wpa_ie) &&
-            ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
-             WLAN_EID_VENDOR_SPECIFIC))) {
+       if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
                iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
                oui = &mwifiex_wpa_oui[cipher][0];
                ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -180,11 +210,8 @@ mwifiex_is_bss_wapi(struct mwifiex_private *priv,
                    struct mwifiex_bssdescriptor *bss_desc)
 {
        if (priv->sec_info.wapi_enabled &&
-           (bss_desc->bcn_wapi_ie &&
-            ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
-                       WLAN_EID_BSS_AC_ACCESS_DELAY))) {
+           has_ieee_hdr(bss_desc->bcn_wapi_ie, WLAN_EID_BSS_AC_ACCESS_DELAY))
                return true;
-       }
        return false;
 }
 
@@ -197,12 +224,9 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
                      struct mwifiex_bssdescriptor *bss_desc)
 {
        if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
-           !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
-               ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
-                WLAN_EID_VENDOR_SPECIFIC)) &&
-           ((!bss_desc->bcn_rsn_ie) ||
-               ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
-                WLAN_EID_RSN)) &&
+           !priv->sec_info.wpa2_enabled &&
+           !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+           !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
            !priv->sec_info.encryption_mode && !bss_desc->privacy) {
                return true;
        }
@@ -233,29 +257,14 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
                   struct mwifiex_bssdescriptor *bss_desc)
 {
        if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
-           !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
-           ((*(bss_desc->bcn_wpa_ie)).
-            vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
+           !priv->sec_info.wpa2_enabled &&
+           has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)
           /*
            * Privacy bit may NOT be set in some APs like
            * LinkSys WRT54G && bss_desc->privacy
            */
         ) {
-               mwifiex_dbg(priv->adapter, INFO,
-                           "info: %s: WPA:\t"
-                           "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
-                           "EncMode=%#x privacy=%#x\n", __func__,
-                           (bss_desc->bcn_wpa_ie) ?
-                           (*bss_desc->bcn_wpa_ie).
-                           vend_hdr.element_id : 0,
-                           (bss_desc->bcn_rsn_ie) ?
-                           (*bss_desc->bcn_rsn_ie).
-                           ieee_hdr.element_id : 0,
-                           (priv->sec_info.wep_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                           priv->sec_info.encryption_mode,
-                           bss_desc->privacy);
+               dbg_security_flags(INFO, "WPA", priv, bss_desc);
                return true;
        }
        return false;
@@ -269,30 +278,14 @@ static bool
 mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
                    struct mwifiex_bssdescriptor *bss_desc)
 {
-       if (!priv->sec_info.wep_enabled &&
-           !priv->sec_info.wpa_enabled &&
+       if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
            priv->sec_info.wpa2_enabled &&
-           ((bss_desc->bcn_rsn_ie) &&
-            ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
+           has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
                /*
                 * Privacy bit may NOT be set in some APs like
                 * LinkSys WRT54G && bss_desc->privacy
                 */
-               mwifiex_dbg(priv->adapter, INFO,
-                           "info: %s: WPA2:\t"
-                           "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
-                           "EncMode=%#x privacy=%#x\n", __func__,
-                           (bss_desc->bcn_wpa_ie) ?
-                           (*bss_desc->bcn_wpa_ie).
-                           vend_hdr.element_id : 0,
-                           (bss_desc->bcn_rsn_ie) ?
-                           (*bss_desc->bcn_rsn_ie).
-                           ieee_hdr.element_id : 0,
-                           (priv->sec_info.wep_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                           priv->sec_info.encryption_mode,
-                           bss_desc->privacy);
+               dbg_security_flags(INFO, "WAP2", priv, bss_desc);
                return true;
        }
        return false;
@@ -308,11 +301,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
 {
        if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
            !priv->sec_info.wpa2_enabled &&
-           ((!bss_desc->bcn_wpa_ie) ||
-            ((*(bss_desc->bcn_wpa_ie)).
-             vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
-           ((!bss_desc->bcn_rsn_ie) ||
-            ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+           !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+           !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
            !priv->sec_info.encryption_mode && bss_desc->privacy) {
                return true;
        }
@@ -329,25 +319,10 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
 {
        if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
            !priv->sec_info.wpa2_enabled &&
-           ((!bss_desc->bcn_wpa_ie) ||
-            ((*(bss_desc->bcn_wpa_ie)).
-             vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
-           ((!bss_desc->bcn_rsn_ie) ||
-            ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+           !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+           !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
            priv->sec_info.encryption_mode && bss_desc->privacy) {
-               mwifiex_dbg(priv->adapter, INFO,
-                           "info: %s: dynamic\t"
-                           "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
-                           "EncMode=%#x privacy=%#x\n",
-                           __func__,
-                           (bss_desc->bcn_wpa_ie) ?
-                           (*bss_desc->bcn_wpa_ie).
-                           vend_hdr.element_id : 0,
-                           (bss_desc->bcn_rsn_ie) ?
-                           (*bss_desc->bcn_rsn_ie).
-                           ieee_hdr.element_id : 0,
-                           priv->sec_info.encryption_mode,
-                           bss_desc->privacy);
+               dbg_security_flags(INFO, "dynamic", priv, bss_desc);
                return true;
        }
        return false;
@@ -460,18 +435,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
                }
 
                /* Security doesn't match */
-               mwifiex_dbg(adapter, ERROR,
-                           "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
-                           "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
-                           __func__,
-                           (bss_desc->bcn_wpa_ie) ?
-                           (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
-                           (bss_desc->bcn_rsn_ie) ?
-                           (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
-                           (priv->sec_info.wep_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa_enabled) ? "e" : "d",
-                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                           priv->sec_info.encryption_mode, bss_desc->privacy);
+               dbg_security_flags(ERROR, "failed", priv, bss_desc);
                return -1;
        }
 
@@ -494,13 +458,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
                                                        *scan_chan_list,
                                 u8 filtered_scan)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        struct mwifiex_adapter *adapter = priv->adapter;
        int chan_idx = 0, i;
 
-       for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
+       for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
 
                if (!priv->wdev.wiphy->bands[band])
                        continue;
@@ -534,11 +498,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
                                        &= ~MWIFIEX_PASSIVE_SCAN;
                        scan_chan_list[chan_idx].chan_number =
                                                        (u32) ch->hw_value;
+
+                       scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                                       |= MWIFIEX_DISABLE_CHAN_FILT;
+
                        if (filtered_scan) {
                                scan_chan_list[chan_idx].max_scan_time =
                                cpu_to_le16(adapter->specific_scan_time);
-                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
-                                       |= MWIFIEX_DISABLE_CHAN_FILT;
                        }
                        chan_idx++;
                }
@@ -557,13 +523,13 @@ mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
                                   struct mwifiex_chan_scan_param_set
                                                *scan_chan_list)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        struct mwifiex_adapter *adapter = priv->adapter;
        int chan_idx = 0, i;
 
-       for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+       for (band = 0; (band < NUM_NL80211_BANDS); band++) {
                if (!priv->wdev.wiphy->bands[band])
                        continue;
 
@@ -655,8 +621,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
        int ret = 0;
        struct mwifiex_chan_scan_param_set *tmp_chan_list;
        struct mwifiex_chan_scan_param_set *start_chan;
-       struct cmd_ctrl_node *cmd_node, *tmp_node;
-       unsigned long flags;
        u32 tlv_idx, rates_size, cmd_no;
        u32 total_scan_time;
        u32 done_early;
@@ -813,16 +777,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                            sizeof(struct mwifiex_ie_types_header) + rates_size;
 
                if (ret) {
-                       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-                       list_for_each_entry_safe(cmd_node, tmp_node,
-                                                &adapter->scan_pending_q,
-                                                list) {
-                               list_del(&cmd_node->list);
-                               cmd_node->wait_q_enabled = false;
-                               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-                       }
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
+                       mwifiex_cancel_pending_scan_cmd(adapter);
                        break;
                }
        }
@@ -912,14 +867,11 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                /* Set the BSS type scan filter, use Adapter setting if
                   unset */
                scan_cfg_out->bss_mode =
-                       (user_scan_in->bss_mode ? (u8) user_scan_in->
-                        bss_mode : (u8) adapter->scan_mode);
+                       (u8)(user_scan_in->bss_mode ?: adapter->scan_mode);
 
                /* Set the number of probes to send, use Adapter setting
                   if unset */
-               num_probes =
-                       (user_scan_in->num_probes ? user_scan_in->
-                        num_probes : adapter->scan_probes);
+               num_probes = user_scan_in->num_probes ?: adapter->scan_probes;
 
                /*
                 * Set the BSSID filter to the incoming configuration,
@@ -1094,28 +1046,24 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                     chan_idx++) {
 
                        channel = user_scan_in->chan_list[chan_idx].chan_number;
-                       (scan_chan_list + chan_idx)->chan_number = channel;
+                       scan_chan_list[chan_idx].chan_number = channel;
 
                        radio_type =
                                user_scan_in->chan_list[chan_idx].radio_type;
-                       (scan_chan_list + chan_idx)->radio_type = radio_type;
+                       scan_chan_list[chan_idx].radio_type = radio_type;
 
                        scan_type = user_scan_in->chan_list[chan_idx].scan_type;
 
                        if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
-                               (scan_chan_list +
-                                chan_idx)->chan_scan_mode_bitmap
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
                                        |= (MWIFIEX_PASSIVE_SCAN |
                                            MWIFIEX_HIDDEN_SSID_REPORT);
                        else
-                               (scan_chan_list +
-                                chan_idx)->chan_scan_mode_bitmap
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
                                        &= ~MWIFIEX_PASSIVE_SCAN;
 
-                       if (*filtered_scan)
-                               (scan_chan_list +
-                                chan_idx)->chan_scan_mode_bitmap
-                                       |= MWIFIEX_DISABLE_CHAN_FILT;
+                       scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                               |= MWIFIEX_DISABLE_CHAN_FILT;
 
                        if (user_scan_in->chan_list[chan_idx].scan_time) {
                                scan_dur = (u16) user_scan_in->
@@ -1129,9 +1077,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                                        scan_dur = adapter->active_scan_time;
                        }
 
-                       (scan_chan_list + chan_idx)->min_scan_time =
+                       scan_chan_list[chan_idx].min_scan_time =
                                cpu_to_le16(scan_dur);
-                       (scan_chan_list + chan_idx)->max_scan_time =
+                       scan_chan_list[chan_idx].max_scan_time =
                                cpu_to_le16(scan_dur);
                }
 
@@ -1991,12 +1939,13 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
-       struct cmd_ctrl_node *cmd_node, *tmp_node;
+       struct cmd_ctrl_node *cmd_node;
        unsigned long flags;
 
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
        if (list_empty(&adapter->scan_pending_q)) {
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -2018,13 +1967,10 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                }
        } else if ((priv->scan_aborting && !priv->scan_request) ||
                   priv->scan_block) {
-               list_for_each_entry_safe(cmd_node, tmp_node,
-                                        &adapter->scan_pending_q, list) {
-                       list_del(&cmd_node->list);
-                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-               }
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
 
+               mwifiex_cancel_pending_scan_cmd(adapter);
+
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
index b2c839ae2c3c146e3d32786a2a99360fa254eb89..099722e1f8673f00b05812fada05a684ed4b37ea 100644 (file)
@@ -73,6 +73,66 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
        {"EXTLAST", NULL, 0, 0xFE},
 };
 
+static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+       { .compatible = "marvell,sd8897" },
+       { .compatible = "marvell,sd8997" },
+       { }
+};
+
+static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv)
+{
+       struct mwifiex_plt_wake_cfg *cfg = priv;
+
+       if (cfg->irq_wifi >= 0) {
+               pr_info("%s: wake by wifi", __func__);
+               cfg->wake_by_wifi = true;
+               disable_irq_nosync(irq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* This function parse device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * if the device tree node exist and include interrupts attributes, this
+ * function will also request platform specific wakeup interrupt.
+ */
+static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
+{
+       struct mwifiex_plt_wake_cfg *cfg;
+       int ret;
+
+       if (!dev->of_node ||
+           !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+               pr_err("sdio platform data not available");
+               return -1;
+       }
+
+       card->plt_of_node = dev->of_node;
+       card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+                                         GFP_KERNEL);
+       cfg = card->plt_wake_cfg;
+       if (cfg && card->plt_of_node) {
+               cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
+               if (!cfg->irq_wifi) {
+                       dev_err(dev, "fail to parse irq_wifi from device tree");
+               } else {
+                       ret = devm_request_irq(dev, cfg->irq_wifi,
+                                              mwifiex_wake_irq_wifi,
+                                              IRQF_TRIGGER_LOW,
+                                              "wifi_wake", cfg);
+                       if (ret) {
+                               dev_err(dev,
+                                       "Failed to request irq_wifi %d (%d)\n",
+                                       cfg->irq_wifi, ret);
+                       }
+                       disable_irq(cfg->irq_wifi);
+               }
+       }
+
+       return 0;
+}
+
 /*
  * SDIO probe.
  *
@@ -127,6 +187,9 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                return -EIO;
        }
 
+       /* device tree node parsing and platform specific configuration*/
+       mwifiex_sdio_probe_of(&func->dev, card);
+
        if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
                             MWIFIEX_SDIO)) {
                pr_err("%s: add card failed\n", __func__);
@@ -183,6 +246,13 @@ static int mwifiex_sdio_resume(struct device *dev)
        mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
                          MWIFIEX_SYNC_CMD);
 
+       /* Disable platform specific wakeup interrupt */
+       if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+               disable_irq_wake(card->plt_wake_cfg->irq_wifi);
+               if (!card->plt_wake_cfg->wake_by_wifi)
+                       disable_irq(card->plt_wake_cfg->irq_wifi);
+       }
+
        return 0;
 }
 
@@ -262,6 +332,13 @@ static int mwifiex_sdio_suspend(struct device *dev)
 
        adapter = card->adapter;
 
+       /* Enable platform specific wakeup interrupt */
+       if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+               card->plt_wake_cfg->wake_by_wifi = false;
+               enable_irq(card->plt_wake_cfg->irq_wifi);
+               enable_irq_wake(card->plt_wake_cfg->irq_wifi);
+       }
+
        /* Enable the Host Sleep */
        if (!mwifiex_enable_hs(adapter)) {
                mwifiex_dbg(adapter, ERROR,
@@ -1026,13 +1103,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                offset += txlen;
        } while (true);
 
-       sdio_release_host(card->func);
-
        mwifiex_dbg(adapter, MSG,
                    "info: FW download over, size %d bytes\n", offset);
 
        ret = 0;
 done:
+       sdio_release_host(card->func);
        kfree(fwbuf);
        return ret;
 }
@@ -1123,8 +1199,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
                                    __func__, pkt_len, blk_size);
                        break;
                }
-               skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
-                                                        GFP_KERNEL | GFP_DMA);
+
+               skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL);
                if (!skb_deaggr)
                        break;
                skb_put(skb_deaggr, pkt_len);
@@ -1373,8 +1449,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
                        /* copy pkt to deaggr buf */
                        skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
-                                                                GFP_KERNEL |
-                                                                GFP_DMA);
+                                                                GFP_KERNEL);
                        if (!skb_deaggr) {
                                mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
                                            "drop pkt len=%d type=%d\n",
index b9fbc5cf6262d8647d6064ddde51211acf749f72..db837f12c547946d1f76310e9b4c4377052eb904 100644 (file)
        a->mpa_rx.start_port = 0;                                       \
 } while (0)
 
+struct mwifiex_plt_wake_cfg {
+       int irq_wifi;
+       bool wake_by_wifi;
+};
+
 /* data structure for SDIO MPA TX */
 struct mwifiex_sdio_mpa_tx {
        /* multiport tx aggregation buffer pointer */
@@ -237,6 +242,8 @@ struct mwifiex_sdio_card_reg {
 struct sdio_mmc_card {
        struct sdio_func *func;
        struct mwifiex_adapter *adapter;
+       struct device_node *plt_of_node;
+       struct mwifiex_plt_wake_cfg *plt_wake_cfg;
 
        const char *firmware;
        const struct mwifiex_sdio_card_reg *reg;
index 30f152601c5774e266219066acee03f2cb3b21a8..e436574b169865344d39600cac714634b029f18d 100644 (file)
@@ -1558,6 +1558,30 @@ static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
        return 0;
 }
 
+static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
+                                        struct host_cmd_ds_command *cmd,
+                                        u16 cmd_action,
+                                        struct cfg80211_gtk_rekey_data *data)
+{
+       struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey;
+       u64 rekey_ctr;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG);
+       cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN);
+
+       rekey->action = cpu_to_le16(cmd_action);
+       if (cmd_action == HostCmd_ACT_GEN_SET) {
+               memcpy(rekey->kek, data->kek, NL80211_KEK_LEN);
+               memcpy(rekey->kck, data->kck, NL80211_KCK_LEN);
+               rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr);
+               rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr);
+               rekey->replay_ctr_high =
+                       cpu_to_le32((u32)((u64)rekey_ctr >> 32));
+       }
+
+       return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd,
@@ -2094,6 +2118,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
                                              data_buf);
                break;
+       case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+               ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
+                                                   data_buf);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2134,6 +2162,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
        enum state_11d_t state_11d;
        struct mwifiex_ds_11n_tx_cfg tx_cfg;
        u8 sdio_sp_rx_aggr_enable;
+       int data;
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -2154,9 +2183,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                 * The cal-data can be read from device tree and/or
                 * a configuration file and downloaded to firmware.
                 */
-               adapter->dt_node =
-                               of_find_node_by_name(NULL, "marvell_cfgdata");
-               if (adapter->dt_node) {
+               if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+                   adapter->dev->of_node) {
+                       adapter->dt_node = adapter->dev->of_node;
+                       if (of_property_read_u32(adapter->dt_node,
+                                                "marvell,wakeup-pin",
+                                                &data) == 0) {
+                               pr_debug("Wakeup pin = 0x%x\n", data);
+                               adapter->hs_cfg.gpio = data;
+                       }
+
                        ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
                                                      "marvell,caldata");
                        if (ret)
index d96523e10eb46ef051a1b40a35f408d42dc0404d..d18c7979d723bfa972ed6ebb5ea091e6841b1596 100644 (file)
@@ -44,7 +44,6 @@ static void
 mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
                              struct host_cmd_ds_command *resp)
 {
-       struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
        struct mwifiex_adapter *adapter = priv->adapter;
        struct host_cmd_ds_802_11_ps_mode_enh *pm;
        unsigned long flags;
@@ -71,17 +70,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
                break;
        case HostCmd_CMD_802_11_SCAN:
        case HostCmd_CMD_802_11_SCAN_EXT:
-               /* Cancel all pending scan command */
-               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               list_for_each_entry_safe(cmd_node, tmp_node,
-                                        &adapter->scan_pending_q, list) {
-                       list_del(&cmd_node->list);
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
-                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-                       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               }
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+               mwifiex_cancel_pending_scan_cmd(adapter);
 
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
                adapter->scan_processing = false;
@@ -1244,6 +1233,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_ROBUST_COEX:
                ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
                break;
+       case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+               break;
        default:
                mwifiex_dbg(adapter, ERROR,
                            "CMD_RESP: unknown cmd response %#x\n",
index 070bce401151a522983a3ce20fe6055ae4113612..0104108b4ea2f85360065c28dc2628726cccfd4d 100644 (file)
@@ -147,6 +147,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
        if (netif_carrier_ok(priv->netdev))
                netif_carrier_off(priv->netdev);
+
+       mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+                        HostCmd_ACT_GEN_REMOVE, 0, NULL, false);
 }
 
 static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
index d5c56eb9e985f3be8d8270b5b2b9fbd6f57e6b1a..8e0862657122a412abc76c86ce140f5ec0853b4b 100644 (file)
@@ -146,6 +146,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        size_t beacon_ie_len;
        struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
        const struct cfg80211_bss_ies *ies;
+       int ret;
 
        rcu_read_lock();
        ies = rcu_dereference(bss->ies);
@@ -189,7 +190,48 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
                bss_desc->sensed_11h = true;
 
-       return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+       ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+       if (ret)
+               return ret;
+
+       /* Update HT40 capability based on current channel information */
+       if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) {
+               u8 ht_param = bss_desc->bcn_ht_oper->ht_param;
+               u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band);
+               struct ieee80211_supported_band *sband =
+                                               priv->wdev.wiphy->bands[radio];
+               int freq = ieee80211_channel_to_frequency(bss_desc->channel,
+                                                         radio);
+               struct ieee80211_channel *chan =
+                       ieee80211_get_channel(priv->adapter->wiphy, freq);
+
+               switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                       if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) {
+                               sband->ht_cap.cap &=
+                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                               sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+                       } else {
+                               sband->ht_cap.cap |=
+                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                                       IEEE80211_HT_CAP_SGI_40;
+                       }
+                       break;
+               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                       if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) {
+                               sband->ht_cap.cap &=
+                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                               sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+                       } else {
+                               sband->ht_cap.cap |=
+                                       IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                                       IEEE80211_HT_CAP_SGI_40;
+                       }
+                       break;
+               }
+       }
+
+       return 0;
 }
 
 void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
@@ -509,7 +551,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
 
        if (priv && priv->sched_scanning) {
 #ifdef CONFIG_PM
-               if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+               if (priv->wdev.wiphy->wowlan_config &&
+                   !priv->wdev.wiphy->wowlan_config->nd_config) {
 #endif
                        mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
                        mwifiex_stop_bg_scan(priv);
index 150649602e98298bed2975f8444c945e640dfb1a..df9704de07150d82887de1db36c99fd15d3933f7 100644 (file)
@@ -285,7 +285,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
        else
                usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
 
-       /* find the minmum bandwith between AP/TDLS peers */
+       /* find the minimum bandwidth between AP/TDLS peers */
        vht_cap = &sta_ptr->tdls_cap.vhtcap;
        supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
        peer_supp_chwd_set =
index bf6182b646a5436ad083623fa2c2980ce8019f28..abdd0cf710bf6df2440595dfbe879debf208be9a 100644 (file)
@@ -297,6 +297,13 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                goto done;
 
        mwifiex_set_trans_start(priv->netdev);
+
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+               atomic_dec_return(&adapter->pending_bridged_pkts);
+
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+               goto done;
+
        if (!status) {
                priv->stats.tx_packets++;
                priv->stats.tx_bytes += tx_info->pkt_len;
@@ -306,12 +313,6 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
-       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
-               atomic_dec_return(&adapter->pending_bridged_pkts);
-
-       if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
-               goto done;
-
        if (aggr)
                /* For skb_aggr, do not wake up tx queue */
                goto done;
index 16d95b22fe5c99a2803ceed9527e637c17e75b05..f79d00d1e294425ae4ed43dfa1617aa54fdab895 100644 (file)
@@ -694,7 +694,7 @@ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
        struct mwifiex_ie_list *ap_ie = cmd_buf;
        struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
 
-       if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+       if (!ap_ie || !ap_ie->len)
                return -1;
 
        *ie_size += le16_to_cpu(ap_ie->len) +
@@ -816,7 +816,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
                                                     chandef.chan->center_freq);
 
        /* Set appropriate bands */
-       if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
+       if (chandef.chan->band == NL80211_BAND_2GHZ) {
                bss_cfg->band_cfg = BAND_CONFIG_BG;
                config_bands = BAND_B | BAND_G;
 
index 52f7981a8afcc711fa9cc1eb368175f319d9094b..666e91af59d773dc0b83061d84f4a598cf66497e 100644 (file)
@@ -102,6 +102,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        int hdr_chop;
        struct ethhdr *p_ethhdr;
        struct mwifiex_sta_node *src_node;
+       int index;
 
        uap_rx_pd = (struct uap_rxpd *)(skb->data);
        rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -208,10 +209,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        }
 
        __net_timestamp(skb);
+
+       index = mwifiex_1d_to_wmm_queue[skb->priority];
+       atomic_inc(&priv->wmm_tx_pending[index]);
        mwifiex_wmm_add_buf_txqueue(priv, skb);
        atomic_inc(&adapter->tx_pending);
        atomic_inc(&adapter->pending_bridged_pkts);
 
+       mwifiex_queue_main_work(priv->adapter);
+
        return;
 }
 
@@ -263,6 +269,96 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
        return mwifiex_process_rx_packet(priv, skb);
 }
 
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+                           struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = adapter;
+       struct mwifiex_sta_node *src_node;
+       struct ethhdr *p_ethhdr;
+       struct sk_buff *skb_uap;
+       struct mwifiex_txinfo *tx_info;
+
+       if (!skb)
+               return -1;
+
+       p_ethhdr = (void *)skb->data;
+       src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
+       if (src_node) {
+               src_node->stats.last_rx = jiffies;
+               src_node->stats.rx_bytes += skb->len;
+               src_node->stats.rx_packets++;
+       }
+
+       skb->dev = priv->netdev;
+       skb->protocol = eth_type_trans(skb, priv->netdev);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* This is required only in case of 11n and USB/PCIE as we alloc
+        * a buffer of 4K only if its 11N (to be able to receive 4K
+        * AMSDU packets). In case of SD we allocate buffers based
+        * on the size of packet and hence this is not needed.
+        *
+        * Modifying the truesize here as our allocation for each
+        * skb is 4K but we only receive 2K packets and this cause
+        * the kernel to start dropping packets in case where
+        * application has allocated buffer based on 2K size i.e.
+        * if there a 64K packet received (in IP fragments and
+        * application allocates 64K to receive this packet but
+        * this packet would almost double up because we allocate
+        * each 1.5K fragment in 4K and pass it up. As soon as the
+        * 64K limit hits kernel will start to drop rest of the
+        * fragments. Currently we fail the Filesndl-ht.scr script
+        * for UDP, hence this fix
+        */
+       if ((adapter->iface_type == MWIFIEX_USB ||
+            adapter->iface_type == MWIFIEX_PCIE) &&
+           (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
+               skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
+       if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
+           mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
+               if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
+                       skb_uap =
+                       skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+               else
+                       skb_uap = skb_copy(skb, GFP_ATOMIC);
+
+               if (likely(skb_uap)) {
+                       tx_info = MWIFIEX_SKB_TXCB(skb_uap);
+                       memset(tx_info, 0, sizeof(*tx_info));
+                       tx_info->bss_num = priv->bss_num;
+                       tx_info->bss_type = priv->bss_type;
+                       tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+                       __net_timestamp(skb_uap);
+                       mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
+                       atomic_inc(&adapter->tx_pending);
+                       atomic_inc(&adapter->pending_bridged_pkts);
+                       if ((atomic_read(&adapter->pending_bridged_pkts) >=
+                                       MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "Tx: Bridge packet limit reached. Drop packet!\n");
+                               mwifiex_uap_cleanup_tx_queues(priv);
+                       }
+
+               } else {
+                       mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
+               }
+
+               mwifiex_queue_main_work(adapter);
+               /* Don't forward Intra-BSS unicast packet to upper layer*/
+               if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
+                       return 0;
+       }
+
+       /* Forward multicast/broadcast packet to upper layer*/
+       if (in_interrupt())
+               netif_rx(skb);
+       else
+               netif_rx_ni(skb);
+
+       return 0;
+}
+
 /*
  * This function processes the packet received on AP interface.
  *
index 05108618430da866e680ac51498f229e1be8f4ea..0857575c5c39fe45494b02e6437c40da84ac99ed 100644 (file)
@@ -995,7 +995,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 {
        int ret = 0;
        u8 *firmware = fw->fw_buf, *recv_buff;
-       u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
+       u32 retries = USB8XXX_FW_MAX_RETRY + 1;
+       u32 dlen;
        u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
        struct fw_data *fwdata;
        struct fw_sync_header sync_fw;
@@ -1017,8 +1018,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
        /* Allocate memory for receive */
        recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
-       if (!recv_buff)
+       if (!recv_buff) {
+               ret = -ENOMEM;
                goto cleanup;
+       }
 
        do {
                /* Send pseudo data to check winner status first */
@@ -1041,7 +1044,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                }
 
                /* If the send/receive fails or CRC occurs then retry */
-               while (retries--) {
+               while (--retries) {
                        u8 *buf = (u8 *)fwdata;
                        u32 len = FW_DATA_XMIT_SIZE;
 
@@ -1101,7 +1104,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                continue;
                        }
 
-                       retries = USB8XXX_FW_MAX_RETRY;
+                       retries = USB8XXX_FW_MAX_RETRY + 1;
                        break;
                }
                fw_seqnum++;
index 088429d0a634d8c4372d45e2dc1cd3b7d2fabf1e..b1b400b59d86456ae5b75a66ba2f227b3dcb8b1c 100644 (file)
@@ -346,20 +346,20 @@ struct mwl8k_sta {
 #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
 
 static const struct ieee80211_channel mwl8k_channels_24[] = {
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -379,10 +379,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
 };
 
 static const struct ieee80211_channel mwl8k_channels_50[] = {
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1010,11 +1010,11 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
        }
 
        if (rxd->channel > 14) {
-               status->band = IEEE80211_BAND_5GHZ;
+               status->band = NL80211_BAND_5GHZ;
                if (!(status->flag & RX_FLAG_HT))
                        status->rate_idx -= 5;
        } else {
-               status->band = IEEE80211_BAND_2GHZ;
+               status->band = NL80211_BAND_2GHZ;
        }
        status->freq = ieee80211_channel_to_frequency(rxd->channel,
                                                      status->band);
@@ -1118,11 +1118,11 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
                status->flag |= RX_FLAG_HT;
 
        if (rxd->channel > 14) {
-               status->band = IEEE80211_BAND_5GHZ;
+               status->band = NL80211_BAND_5GHZ;
                if (!(status->flag & RX_FLAG_HT))
                        status->rate_idx -= 5;
        } else {
-               status->band = IEEE80211_BAND_2GHZ;
+               status->band = NL80211_BAND_2GHZ;
        }
        status->freq = ieee80211_channel_to_frequency(rxd->channel,
                                                      status->band);
@@ -2300,13 +2300,13 @@ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
        BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
        memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
 
-       priv->band_24.band = IEEE80211_BAND_2GHZ;
+       priv->band_24.band = NL80211_BAND_2GHZ;
        priv->band_24.channels = priv->channels_24;
        priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
        priv->band_24.bitrates = priv->rates_24;
        priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
 
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24;
 }
 
 static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
@@ -2319,13 +2319,13 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
        BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
        memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
 
-       priv->band_50.band = IEEE80211_BAND_5GHZ;
+       priv->band_50.band = NL80211_BAND_5GHZ;
        priv->band_50.channels = priv->channels_50;
        priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
        priv->band_50.bitrates = priv->rates_50;
        priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
 
-       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+       hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50;
 }
 
 /*
@@ -2876,9 +2876,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
        cmd->header.length = cpu_to_le16(sizeof(*cmd));
        cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
 
-       if (channel->band == IEEE80211_BAND_2GHZ)
+       if (channel->band == NL80211_BAND_2GHZ)
                cmd->band = cpu_to_le16(0x1);
-       else if (channel->band == IEEE80211_BAND_5GHZ)
+       else if (channel->band == NL80211_BAND_5GHZ)
                cmd->band = cpu_to_le16(0x4);
 
        cmd->channel = cpu_to_le16(channel->hw_value);
@@ -3067,7 +3067,7 @@ static int freq_to_idx(struct mwl8k_priv *priv, int freq)
        struct ieee80211_supported_band *sband;
        int band, ch, idx = 0;
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
                sband = priv->hw->wiphy->bands[band];
                if (!sband)
                        continue;
@@ -3149,9 +3149,9 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
        cmd->action = cpu_to_le16(MWL8K_CMD_SET);
        cmd->current_channel = channel->hw_value;
 
-       if (channel->band == IEEE80211_BAND_2GHZ)
+       if (channel->band == NL80211_BAND_2GHZ)
                cmd->channel_flags |= cpu_to_le32(0x00000001);
-       else if (channel->band == IEEE80211_BAND_5GHZ)
+       else if (channel->band == NL80211_BAND_5GHZ)
                cmd->channel_flags |= cpu_to_le32(0x00000004);
 
        if (!priv->sw_scan_start) {
@@ -4094,10 +4094,10 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
        memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
        cmd->stn_id = cpu_to_le16(sta->aid);
        cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
-       if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-               rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+       if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+               rates = sta->supp_rates[NL80211_BAND_2GHZ];
        else
-               rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+               rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
        cmd->legacy_rates = cpu_to_le32(rates);
        if (sta->ht_cap.ht_supported) {
                cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
@@ -4529,10 +4529,10 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
        p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
        p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
                ((sta->ht_cap.ampdu_density & 7) << 2);
-       if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-               rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+       if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+               rates = sta->supp_rates[NL80211_BAND_2GHZ];
        else
-               rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+               rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
        legacy_rate_mask_to_array(p->legacy_rates, rates);
        memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
        p->interop = 1;
@@ -5010,11 +5010,11 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        goto out;
                }
 
-               if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
-                       ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+               if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
+                       ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
                } else {
                        ap_legacy_rates =
-                               ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+                               ap->supp_rates[NL80211_BAND_5GHZ] << 5;
                }
                memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
@@ -5042,7 +5042,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                                        idx--;
 
                                if (hw->conf.chandef.chan->band ==
-                                   IEEE80211_BAND_2GHZ)
+                                   NL80211_BAND_2GHZ)
                                        rate = mwl8k_rates_24[idx].hw_value;
                                else
                                        rate = mwl8k_rates_50[idx].hw_value;
@@ -5116,7 +5116,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                if (idx)
                        idx--;
 
-               if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+               if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
                        rate = mwl8k_rates_24[idx].hw_value;
                else
                        rate = mwl8k_rates_50[idx].hw_value;
@@ -5388,7 +5388,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
        struct ieee80211_supported_band *sband;
 
        if (priv->ap_fw) {
-               sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+               sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
 
                if (sband && idx >= sband->n_channels) {
                        idx -= sband->n_channels;
@@ -5396,7 +5396,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
                }
 
                if (!sband)
-                       sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+                       sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
                if (!sband || idx >= sband->n_channels)
                        return -ENOENT;
index 26190fd33407bc5e0e33eff25d223d1956ebb16e..8fa78d7156bebc9efc5c5050cd750991967e83e9 100644 (file)
@@ -469,7 +469,7 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
 }
 
 #define CHAN2G(_idx, _freq) {                  \
-       .band = IEEE80211_BAND_2GHZ,            \
+       .band = NL80211_BAND_2GHZ,              \
        .center_freq = (_freq),                 \
        .hw_value = (_idx),                     \
        .max_power = 30,                        \
@@ -563,7 +563,7 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
 {
        dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
                                     GFP_KERNEL);
-       dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+       dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
 
        WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
                ARRAY_SIZE(mt76_channels_2ghz));
index 7fa0128de7e310be2978b35a97746bc011acb324..bf3f0a39908c816efa508db2458ec276a15679a1 100644 (file)
@@ -777,7 +777,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
        u8 offset1;
        u8 offset2;
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
                rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
                offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
                offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -1174,7 +1174,7 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
            container_of(led_cdev, struct rt2x00_led, led_dev);
        unsigned int enabled = brightness != LED_OFF;
        unsigned int bg_mode =
-           (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+           (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
        unsigned int polarity =
                rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
                                   EEPROM_FREQ_LED_POLARITY);
@@ -1741,7 +1741,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
        u8 led_ctrl, led_g_mode, led_r_mode;
 
        rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
                rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
        } else {
@@ -1844,7 +1844,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
                    rt2x00_has_cap_bt_coexist(rt2x00dev)) {
                        rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
                        rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
-                               rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+                               rt2x00dev->curr_band == NL80211_BAND_5GHZ);
                        rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
                } else {
                        rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
@@ -3451,7 +3451,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
         * Matching Delta value   -4   -3   -2   -1    0   +1   +2   +3   +4
         * Example TSSI bounds  0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
         */
-       if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
                rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
                tssi_bounds[0] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG1_MINUS4);
@@ -3546,7 +3546,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
 }
 
 static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
-                                     enum ieee80211_band band)
+                                     enum nl80211_band band)
 {
        u16 eeprom;
        u8 comp_en;
@@ -3562,7 +3562,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
            !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
                return 0;
 
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                comp_en = rt2x00_get_field16(eeprom,
                                 EEPROM_TXPOWER_DELTA_ENABLE_2G);
                if (comp_en) {
@@ -3611,7 +3611,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
 }
 
 static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
-                                  enum ieee80211_band band, int power_level,
+                                  enum nl80211_band band, int power_level,
                                   u8 txpower, int delta)
 {
        u16 eeprom;
@@ -3639,7 +3639,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
                rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
                                   &eeprom);
 
-               if (band == IEEE80211_BAND_2GHZ)
+               if (band == NL80211_BAND_2GHZ)
                        eirp_txpower_criterion = rt2x00_get_field16(eeprom,
                                                 EEPROM_EIRP_MAX_TX_POWER_2GHZ);
                else
@@ -3686,7 +3686,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
        u16 eeprom;
        u32 regs[TX_PWR_CFG_IDX_COUNT];
        unsigned int offset;
-       enum ieee80211_band band = chan->band;
+       enum nl80211_band band = chan->band;
        int delta;
        int i;
 
@@ -3697,7 +3697,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
        /* calculate temperature compensation delta */
        delta = rt2800_get_gain_calibration_delta(rt2x00dev);
 
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                offset = 16;
        else
                offset = 0;
@@ -4055,7 +4055,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
        for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
                rt2x00_dbg(rt2x00dev,
                           "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
-                          (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+                          (band == NL80211_BAND_5GHZ) ? '5' : '2',
                           (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
                                                                '4' : '2',
                           (i > TX_PWR_CFG_9_IDX) ?
@@ -4081,7 +4081,7 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
        u16 eeprom;
        u32 reg, offset;
        int i, is_rate_b, delta, power_ctrl;
-       enum ieee80211_band band = chan->band;
+       enum nl80211_band band = chan->band;
 
        /*
         * Calculate HT40 compensation. For 40MHz we need to add or subtract
@@ -4436,7 +4436,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
 {
        u8 vgc;
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3071) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
@@ -4511,7 +4511,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
        case RT3572:
        case RT3593:
                if (qual->rssi > -65) {
-                       if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+                       if (rt2x00dev->curr_band == NL80211_BAND_2GHZ)
                                vgc += 0x20;
                        else
                                vgc += 0x10;
@@ -7492,6 +7492,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        if (!rt2x00_is_usb(rt2x00dev))
                ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
 
+       /* Set MFP if HW crypto is disabled. */
+       if (rt2800_hwcrypt_disabled(rt2x00dev))
+               ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
+
        SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
        SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
                                rt2800_eeprom_addr(rt2x00dev,
index 6418620f95ff62a7e1a562b7cb29d6e306d2d4ad..f68d492129c6f7189e5116486ffbd2c97c866ec2 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
 #include <linux/average.h>
+#include <linux/usb.h>
 
 #include <net/mac80211.h>
 
@@ -752,8 +753,8 @@ struct rt2x00_dev {
         * IEEE80211 control structure.
         */
        struct ieee80211_hw *hw;
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-       enum ieee80211_band curr_band;
+       struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+       enum nl80211_band curr_band;
        int curr_freq;
 
        /*
@@ -1002,6 +1003,8 @@ struct rt2x00_dev {
 
        /* Extra TX headroom required for alignment purposes. */
        unsigned int extra_tx_headroom;
+
+       struct usb_anchor *anchor;
 };
 
 struct rt2x00_bar_list_entry {
index 5639ed816813b21d886fe4e6011af7677e9b1aca..4e0c5653054bbdc9d313872760c2994f681fd191 100644 (file)
@@ -911,7 +911,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
                              const int value)
 {
        /* XXX: this assumption about the band is wrong for 802.11j */
-       entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
        entry->center_freq = ieee80211_channel_to_frequency(channel,
                                                            entry->band);
        entry->hw_value = value;
@@ -975,13 +975,13 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
         * Channels: 2.4 GHz
         */
        if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
-               rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14;
-               rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates;
-               rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels;
-               rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                   &rt2x00dev->bands[IEEE80211_BAND_2GHZ];
-               memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap,
+               rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
+               rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
+               rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
+               rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
+               hw->wiphy->bands[NL80211_BAND_2GHZ] =
+                   &rt2x00dev->bands[NL80211_BAND_2GHZ];
+               memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
                       &spec->ht, sizeof(spec->ht));
        }
 
@@ -991,15 +991,15 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
         * Channels: OFDM, UNII, HiperLAN2.
         */
        if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
-               rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels =
+               rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
                    spec->num_channels - 14;
-               rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates =
+               rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
                    num_rates - 4;
-               rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14];
-               rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                   &rt2x00dev->bands[IEEE80211_BAND_5GHZ];
-               memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap,
+               rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
+               rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
+               hw->wiphy->bands[NL80211_BAND_5GHZ] =
+                   &rt2x00dev->bands[NL80211_BAND_5GHZ];
+               memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
                       &spec->ht, sizeof(spec->ht));
        }
 
@@ -1016,11 +1016,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
        if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
                ieee80211_unregister_hw(rt2x00dev->hw);
 
-       if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
-               kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-               kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates);
-               rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
-               rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+       if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
+               kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
+               kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
+               rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+               rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
        }
 
        kfree(rt2x00dev->spec.channels_info);
@@ -1422,11 +1422,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
        cancel_work_sync(&rt2x00dev->intf_work);
        cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
        cancel_work_sync(&rt2x00dev->sleep_work);
+#ifdef CONFIG_RT2X00_LIB_USB
        if (rt2x00_is_usb(rt2x00dev)) {
+               usb_kill_anchored_urbs(rt2x00dev->anchor);
                hrtimer_cancel(&rt2x00dev->txstatus_timer);
                cancel_work_sync(&rt2x00dev->rxdone_work);
                cancel_work_sync(&rt2x00dev->txdone_work);
        }
+#endif
        if (rt2x00dev->workqueue)
                destroy_workqueue(rt2x00dev->workqueue);
 
index 7627af6098eb5896420ef99bbfcc5c022f116d39..7cf26c6124d12e8246de910cab351ca902a945ea 100644 (file)
@@ -171,8 +171,11 @@ static void rt2x00usb_register_read_async_cb(struct urb *urb)
 {
        struct rt2x00_async_read_data *rd = urb->context;
        if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
-               if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+               usb_anchor_urb(urb, rd->rt2x00dev->anchor);
+               if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+                       usb_unanchor_urb(urb);
                        kfree(rd);
+               }
        } else
                kfree(rd);
 }
@@ -206,8 +209,11 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
        usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
                             (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
                             rt2x00usb_register_read_async_cb, rd);
-       if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+       usb_anchor_urb(urb, rt2x00dev->anchor);
+       if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+               usb_unanchor_urb(urb);
                kfree(rd);
+       }
        usb_free_urb(urb);
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
@@ -313,8 +319,10 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
                          entry->skb->data, length,
                          rt2x00usb_interrupt_txdone, entry);
 
+       usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
        status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
        if (status) {
+               usb_unanchor_urb(entry_priv->urb);
                if (status == -ENODEV)
                        clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -402,8 +410,10 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
                          entry->skb->data, entry->skb->len,
                          rt2x00usb_interrupt_rxdone, entry);
 
+       usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
        status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
        if (status) {
+               usb_unanchor_urb(entry_priv->urb);
                if (status == -ENODEV)
                        clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -818,6 +828,13 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
        if (retval)
                goto exit_free_reg;
 
+       rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
+                                       sizeof(struct usb_anchor),
+                                       GFP_KERNEL);
+       if (!rt2x00dev->anchor)
+               goto exit_free_reg;
+
+       init_usb_anchor(rt2x00dev->anchor);
        return 0;
 
 exit_free_reg:
index 24a3436ef952870a118e0adddf5f06fa3c5ca6f5..03013eb2f6429d89d96c01013d4ac4a5213b1550 100644 (file)
@@ -252,9 +252,9 @@ static void rt61pci_brightness_set(struct led_classdev *led_cdev,
            container_of(led_cdev, struct rt2x00_led, led_dev);
        unsigned int enabled = brightness != LED_OFF;
        unsigned int a_mode =
-           (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+           (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
        unsigned int bg_mode =
-           (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+           (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 
        if (led->type == LED_TYPE_RADIO) {
                rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -643,12 +643,12 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
        case ANTENNA_HW_DIVERSITY:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-                                 (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ));
+                                 (rt2x00dev->curr_band != NL80211_BAND_5GHZ));
                break;
        case ANTENNA_A:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-               if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+               if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
                else
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -657,7 +657,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
        default:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-               if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+               if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
                else
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -808,7 +808,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
        BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
               ant->tx == ANTENNA_SW_DIVERSITY);
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                sel = antenna_sel_a;
                lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
        } else {
@@ -822,9 +822,9 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
        rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
 
        rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
-                          rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+                          rt2x00dev->curr_band == NL80211_BAND_2GHZ);
        rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
-                          rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+                          rt2x00dev->curr_band == NL80211_BAND_5GHZ);
 
        rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
 
@@ -846,7 +846,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        u16 eeprom;
        short lna_gain = 0;
 
-       if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+       if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
                if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
                        lna_gain += 14;
 
@@ -1048,7 +1048,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
        /*
         * Determine r17 bounds.
         */
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                low_bound = 0x28;
                up_bound = 0x48;
                if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
@@ -2077,7 +2077,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
                return 0;
        }
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                if (lna == 3 || lna == 2)
                        offset += 10;
        }
index 7bbc869311681988dbbcd50907fd33d84f4f1a85..c1397a6d3cee06b7df0fb216a5f7047adfd0caa1 100644 (file)
@@ -197,9 +197,9 @@ static void rt73usb_brightness_set(struct led_classdev *led_cdev,
           container_of(led_cdev, struct rt2x00_led, led_dev);
        unsigned int enabled = brightness != LED_OFF;
        unsigned int a_mode =
-           (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+           (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
        unsigned int bg_mode =
-           (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+           (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 
        if (led->type == LED_TYPE_RADIO) {
                rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -593,13 +593,13 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
        case ANTENNA_HW_DIVERSITY:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
                temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
-                      (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+                      (rt2x00dev->curr_band != NL80211_BAND_5GHZ);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
                break;
        case ANTENNA_A:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-               if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+               if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
                else
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -608,7 +608,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
        default:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-               if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+               if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
                else
                        rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -704,7 +704,7 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
        BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
               ant->tx == ANTENNA_SW_DIVERSITY);
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                sel = antenna_sel_a;
                lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
        } else {
@@ -718,9 +718,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
        rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
 
        rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
-                          (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
+                          (rt2x00dev->curr_band == NL80211_BAND_2GHZ));
        rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
-                          (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
+                          (rt2x00dev->curr_band == NL80211_BAND_5GHZ));
 
        rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
 
@@ -736,7 +736,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        u16 eeprom;
        short lna_gain = 0;
 
-       if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+       if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
                if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
                        lna_gain += 14;
 
@@ -923,7 +923,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
        /*
         * Determine r17 bounds.
         */
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                low_bound = 0x28;
                up_bound = 0x48;
 
@@ -1657,7 +1657,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
                return 0;
        }
 
-       if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+       if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
                if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
                        if (lna == 3 || lna == 2)
                                offset += 10;
index a43a16fde59dc88f175e4ade96b1aab48520d695..e895a84481da0c68adf6642fc9614e3469e5ba90 100644 (file)
@@ -526,7 +526,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                 * ieee80211_generic_frame_duration
                 */
                duration = ieee80211_generic_frame_duration(dev, priv->vif,
-                                       IEEE80211_BAND_2GHZ, skb->len,
+                                       NL80211_BAND_2GHZ, skb->len,
                                        ieee80211_get_tx_rate(dev, info));
 
                frame_duration =  priv->ack_time + le16_to_cpu(duration);
@@ -1018,6 +1018,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
                dma_addr_t *mapping;
                entry = priv->rx_ring + priv->rx_ring_sz*i;
                if (!skb) {
+                       pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+                                       priv->rx_ring, priv->rx_ring_dma);
                        wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
                        return -ENOMEM;
                }
@@ -1028,6 +1030,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
 
                if (pci_dma_mapping_error(priv->pdev, *mapping)) {
                        kfree_skb(skb);
+                       pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+                                       priv->rx_ring, priv->rx_ring_dma);
                        wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
                        return -ENOMEM;
                }
@@ -1529,7 +1533,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
                priv->ack_time =
                        le16_to_cpu(ieee80211_generic_frame_duration(dev,
                                        priv->vif,
-                                       IEEE80211_BAND_2GHZ, 10,
+                                       NL80211_BAND_2GHZ, 10,
                                        &priv->rates[0])) - 10;
 
                rtl8180_conf_erp(dev, info);
@@ -1736,7 +1740,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
        if (err) {
                printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
                       pci_name(pdev));
-               return err;
+               goto err_disable_dev;
        }
 
        io_addr = pci_resource_start(pdev, 0);
@@ -1795,12 +1799,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
        memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
        memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
 
-       priv->band.band = IEEE80211_BAND_2GHZ;
+       priv->band.band = NL80211_BAND_2GHZ;
        priv->band.channels = priv->channels;
        priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = 4;
-       dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
        ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
        ieee80211_hw_set(dev, RX_INCLUDES_FCS);
@@ -1938,6 +1942,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
 
  err_free_reg:
        pci_release_regions(pdev);
+
+ err_disable_dev:
        pci_disable_device(pdev);
        return err;
 }
index b7f72f9c79884f4b039eada1f4d8a92026de1693..231f84db9ab0691e46eee60c41f2b1ce3ccf9312 100644 (file)
@@ -1470,12 +1470,12 @@ static int rtl8187_probe(struct usb_interface *intf,
        memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
        priv->map = (struct rtl818x_csr *)0xFF00;
 
-       priv->band.band = IEEE80211_BAND_2GHZ;
+       priv->band.band = NL80211_BAND_2GHZ;
        priv->band.channels = priv->channels;
        priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
-       dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
 
        ieee80211_hw_set(dev, RX_INCLUDES_FCS);
index abdff458b80f7a8b586423b56cff1d4434d99724..f2ce8c9a31cf53239a534d1c30ab58720929ddf0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver
  *
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -91,33 +91,33 @@ static struct ieee80211_rate rtl8xxxu_rates[] = {
 };
 
 static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
          .hw_value = 1, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
          .hw_value = 2, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
          .hw_value = 3, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
          .hw_value = 4, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
          .hw_value = 5, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
          .hw_value = 6, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
          .hw_value = 7, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
          .hw_value = 8, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
          .hw_value = 9, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
          .hw_value = 10, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
          .hw_value = 11, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
          .hw_value = 12, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
          .hw_value = 13, .max_power = 30 },
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
          .hw_value = 14, .max_power = 30 }
 };
 
@@ -128,7 +128,7 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
        .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
 };
 
-static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+static struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
        {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
        {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
        {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
@@ -184,6 +184,104 @@ static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
        {0xffff, 0xff},
 };
 
+static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
+       {0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
+       {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00},
+       {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+       {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+       {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+       {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+       {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+       {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+       {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+       {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+       {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+       {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f},
+       {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e},
+       {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e},
+       {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00},
+       {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a},
+       {0x525, 0x4f}, {0x540, 0x12}, {0x541, 0x64}, {0x550, 0x10},
+       {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff},
+       {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff},
+       {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff},
+       {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50},
+       {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e},
+       {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8},
+       {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65},
+       {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65},
+       {0x70b, 0x87},
+       {0xffff, 0xff},
+};
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static struct rtl8xxxu_power_base rtl8188r_power_base = {
+       .reg_0e00 = 0x06080808,
+       .reg_0e04 = 0x00040406,
+       .reg_0e08 = 0x00000000,
+       .reg_086c = 0x00000000,
+
+       .reg_0e10 = 0x04060608,
+       .reg_0e14 = 0x00020204,
+       .reg_0e18 = 0x04060608,
+       .reg_0e1c = 0x00020204,
+
+       .reg_0830 = 0x06080808,
+       .reg_0834 = 0x00040406,
+       .reg_0838 = 0x00000000,
+       .reg_086c_2 = 0x00000000,
+
+       .reg_083c = 0x04060608,
+       .reg_0848 = 0x00020204,
+       .reg_084c = 0x04060608,
+       .reg_0868 = 0x00020204,
+};
+
+static struct rtl8xxxu_power_base rtl8192c_power_base = {
+       .reg_0e00 = 0x07090c0c,
+       .reg_0e04 = 0x01020405,
+       .reg_0e08 = 0x00000000,
+       .reg_086c = 0x00000000,
+
+       .reg_0e10 = 0x0b0c0c0e,
+       .reg_0e14 = 0x01030506,
+       .reg_0e18 = 0x0b0c0d0e,
+       .reg_0e1c = 0x01030509,
+
+       .reg_0830 = 0x07090c0c,
+       .reg_0834 = 0x01020405,
+       .reg_0838 = 0x00000000,
+       .reg_086c_2 = 0x00000000,
+
+       .reg_083c = 0x0b0c0d0e,
+       .reg_0848 = 0x01030509,
+       .reg_084c = 0x0b0c0d0e,
+       .reg_0868 = 0x01030509,
+};
+#endif
+
+static struct rtl8xxxu_power_base rtl8723a_power_base = {
+       .reg_0e00 = 0x0a0c0c0c,
+       .reg_0e04 = 0x02040608,
+       .reg_0e08 = 0x00000000,
+       .reg_086c = 0x00000000,
+
+       .reg_0e10 = 0x0a0c0d0e,
+       .reg_0e14 = 0x02040608,
+       .reg_0e18 = 0x0a0c0d0e,
+       .reg_0e1c = 0x02040608,
+
+       .reg_0830 = 0x0a0c0c0c,
+       .reg_0834 = 0x02040608,
+       .reg_0838 = 0x00000000,
+       .reg_086c_2 = 0x00000000,
+
+       .reg_083c = 0x0a0c0d0e,
+       .reg_0848 = 0x02040608,
+       .reg_084c = 0x0a0c0d0e,
+       .reg_0868 = 0x02040608,
+};
+
 static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
        {0x800, 0x80040000}, {0x804, 0x00000003},
        {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -580,6 +678,138 @@ static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
        {0xffff, 0xffffffff},
 };
 
+static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
+       {0x800, 0x80040000}, {0x804, 0x00000003},
+       {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+       {0x810, 0x10001331}, {0x814, 0x020c3d10},
+       {0x818, 0x02220385}, {0x81c, 0x00000000},
+       {0x820, 0x01000100}, {0x824, 0x00390204},
+       {0x828, 0x01000100}, {0x82c, 0x00390204},
+       {0x830, 0x32323232}, {0x834, 0x30303030},
+       {0x838, 0x30303030}, {0x83c, 0x30303030},
+       {0x840, 0x00010000}, {0x844, 0x00010000},
+       {0x848, 0x28282828}, {0x84c, 0x28282828},
+       {0x850, 0x00000000}, {0x854, 0x00000000},
+       {0x858, 0x009a009a}, {0x85c, 0x01000014},
+       {0x860, 0x66f60000}, {0x864, 0x061f0000},
+       {0x868, 0x30303030}, {0x86c, 0x30303030},
+       {0x870, 0x00000000}, {0x874, 0x55004200},
+       {0x878, 0x08080808}, {0x87c, 0x00000000},
+       {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+       {0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+       {0x890, 0x00000800}, {0x894, 0xfffffffe},
+       {0x898, 0x40302010}, {0x900, 0x00000000},
+       {0x904, 0x00000023}, {0x908, 0x00000000},
+       {0x90c, 0x81121313}, {0x910, 0x806c0001},
+       {0x914, 0x00000001}, {0x918, 0x00000000},
+       {0x91c, 0x00010000}, {0x924, 0x00000001},
+       {0x928, 0x00000000}, {0x92c, 0x00000000},
+       {0x930, 0x00000000}, {0x934, 0x00000000},
+       {0x938, 0x00000000}, {0x93c, 0x00000000},
+       {0x940, 0x00000000}, {0x944, 0x00000000},
+       {0x94c, 0x00000008}, {0xa00, 0x00d0c7c8},
+       {0xa04, 0x81ff000c}, {0xa08, 0x8c838300},
+       {0xa0c, 0x2e68120f}, {0xa10, 0x95009b78},
+       {0xa14, 0x1114d028}, {0xa18, 0x00881117},
+       {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000},
+       {0xa24, 0x090e1317}, {0xa28, 0x00000204},
+       {0xa2c, 0x00d30000}, {0xa70, 0x101fff00},
+       {0xa74, 0x00000007}, {0xa78, 0x00000900},
+       {0xa7c, 0x225b0606}, {0xa80, 0x218075b1},
+       {0xb38, 0x00000000}, {0xc00, 0x48071d40},
+       {0xc04, 0x03a05633}, {0xc08, 0x000000e4},
+       {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+       {0xc14, 0x40000100}, {0xc18, 0x08800000},
+       {0xc1c, 0x40000100}, {0xc20, 0x00000000},
+       {0xc24, 0x00000000}, {0xc28, 0x00000000},
+       {0xc2c, 0x00000000}, {0xc30, 0x69e9ac47},
+       {0xc34, 0x469652af}, {0xc38, 0x49795994},
+       {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+       {0xc44, 0x000100b7}, {0xc48, 0xec020107},
+       {0xc4c, 0x007f037f},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0xc50, 0x00340220},
+#else
+       {0xc50, 0x00340020},
+#endif
+       {0xc54, 0x0080801f},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0xc58, 0x00000220},
+#else
+       {0xc58, 0x00000020},
+#endif
+       {0xc5c, 0x00248492}, {0xc60, 0x00000000},
+       {0xc64, 0x7112848b}, {0xc68, 0x47c00bff},
+       {0xc6c, 0x00000036}, {0xc70, 0x00000600},
+       {0xc74, 0x02013169}, {0xc78, 0x0000001f},
+       {0xc7c, 0x00b91612},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0xc80, 0x2d4000b5},
+#else
+       {0xc80, 0x40000100},
+#endif
+       {0xc84, 0x21f60000},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0xc88, 0x2d4000b5},
+#else
+       {0xc88, 0x40000100},
+#endif
+       {0xc8c, 0xa0e40000}, {0xc90, 0x00121820},
+       {0xc94, 0x00000000}, {0xc98, 0x00121820},
+       {0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+       {0xca4, 0x000300a0}, {0xca8, 0x00000000},
+       {0xcac, 0x00000000}, {0xcb0, 0x00000000},
+       {0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+       {0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+       {0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+       {0xccc, 0x00000000}, {0xcd0, 0x00000000},
+       {0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+       {0xcdc, 0x00766932}, {0xce0, 0x00222222},
+       {0xce4, 0x00040000}, {0xce8, 0x77644302},
+       {0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+       {0xd04, 0x00020403}, {0xd08, 0x0000907f},
+       {0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+       {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+       {0xd1c, 0x0000007f}, {0xd2c, 0xcc979975},
+       {0xd30, 0x00000000}, {0xd34, 0x80608000},
+       {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+       {0xd40, 0x00000000}, {0xd44, 0x00000000},
+       {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+       {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+       {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+       {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+       {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+       {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+       {0xd78, 0x000e3c24}, {0xd80, 0x01081008},
+       {0xd84, 0x00000800}, {0xd88, 0xf0b50000},
+       {0xe00, 0x30303030}, {0xe04, 0x30303030},
+       {0xe08, 0x03903030}, {0xe10, 0x30303030},
+       {0xe14, 0x30303030}, {0xe18, 0x30303030},
+       {0xe1c, 0x30303030}, {0xe28, 0x00000000},
+       {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+       {0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+       {0xe40, 0x01007c00}, {0xe44, 0x01004800},
+       {0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+       {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+       {0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+       {0xe60, 0x00000008}, {0xe68, 0x0fc05656},
+       {0xe6c, 0x03c09696}, {0xe70, 0x03c09696},
+       {0xe74, 0x0c005656}, {0xe78, 0x0c005656},
+       {0xe7c, 0x0c005656}, {0xe80, 0x0c005656},
+       {0xe84, 0x03c09696}, {0xe88, 0x0c005656},
+       {0xe8c, 0x03c09696}, {0xed0, 0x03c09696},
+       {0xed4, 0x03c09696}, {0xed8, 0x03c09696},
+       {0xedc, 0x0000d6d6}, {0xee0, 0x0000d6d6},
+       {0xeec, 0x0fc01616}, {0xee4, 0xb0000c1c},
+       {0xee8, 0x00000001}, {0xf14, 0x00000003},
+       {0xf4c, 0x00000000}, {0xf00, 0x00000300},
+       {0xffff, 0xffffffff},
+};
+
 static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
        {0xc78, 0x7b000001}, {0xc78, 0x7b010001},
        {0xc78, 0x7b020001}, {0xc78, 0x7b030001},
@@ -819,6 +1049,144 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
        {0xffff, 0xffffffff}
 };
 
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
+       {0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+       {0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+       {0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+       {0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+       {0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+       {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+       {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+       {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+       {0xc78, 0xf0100001}, {0xc78, 0xef110001},
+       {0xc78, 0xee120001}, {0xc78, 0xed130001},
+       {0xc78, 0xec140001}, {0xc78, 0xeb150001},
+       {0xc78, 0xea160001}, {0xc78, 0xe9170001},
+       {0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+       {0xc78, 0xc81a0001}, {0xc78, 0xc71b0001},
+       {0xc78, 0xc61c0001}, {0xc78, 0x071d0001},
+       {0xc78, 0x061e0001}, {0xc78, 0x051f0001},
+       {0xc78, 0x04200001}, {0xc78, 0x03210001},
+       {0xc78, 0xaa220001}, {0xc78, 0xa9230001},
+       {0xc78, 0xa8240001}, {0xc78, 0xa7250001},
+       {0xc78, 0xa6260001}, {0xc78, 0x85270001},
+       {0xc78, 0x84280001}, {0xc78, 0x83290001},
+       {0xc78, 0x252a0001}, {0xc78, 0x242b0001},
+       {0xc78, 0x232c0001}, {0xc78, 0x222d0001},
+       {0xc78, 0x672e0001}, {0xc78, 0x662f0001},
+       {0xc78, 0x65300001}, {0xc78, 0x64310001},
+       {0xc78, 0x63320001}, {0xc78, 0x62330001},
+       {0xc78, 0x61340001}, {0xc78, 0x45350001},
+       {0xc78, 0x44360001}, {0xc78, 0x43370001},
+       {0xc78, 0x42380001}, {0xc78, 0x41390001},
+       {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+       {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+       {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+       {0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+       {0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+       {0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+       {0xc78, 0xfa460001}, {0xc78, 0xf9470001},
+       {0xc78, 0xf8480001}, {0xc78, 0xf7490001},
+       {0xc78, 0xf64a0001}, {0xc78, 0xf54b0001},
+       {0xc78, 0xf44c0001}, {0xc78, 0xf34d0001},
+       {0xc78, 0xf24e0001}, {0xc78, 0xf14f0001},
+       {0xc78, 0xf0500001}, {0xc78, 0xef510001},
+       {0xc78, 0xee520001}, {0xc78, 0xed530001},
+       {0xc78, 0xec540001}, {0xc78, 0xeb550001},
+       {0xc78, 0xea560001}, {0xc78, 0xe9570001},
+       {0xc78, 0xe8580001}, {0xc78, 0xe7590001},
+       {0xc78, 0xe65a0001}, {0xc78, 0xe55b0001},
+       {0xc78, 0xe45c0001}, {0xc78, 0xe35d0001},
+       {0xc78, 0xe25e0001}, {0xc78, 0xe15f0001},
+       {0xc78, 0x8a600001}, {0xc78, 0x89610001},
+       {0xc78, 0x88620001}, {0xc78, 0x87630001},
+       {0xc78, 0x86640001}, {0xc78, 0x85650001},
+       {0xc78, 0x84660001}, {0xc78, 0x83670001},
+       {0xc78, 0x82680001}, {0xc78, 0x6b690001},
+       {0xc78, 0x6a6a0001}, {0xc78, 0x696b0001},
+       {0xc78, 0x686c0001}, {0xc78, 0x676d0001},
+       {0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+       {0xc78, 0x64700001}, {0xc78, 0x63710001},
+       {0xc78, 0x62720001}, {0xc78, 0x61730001},
+       {0xc78, 0x49740001}, {0xc78, 0x48750001},
+       {0xc78, 0x47760001}, {0xc78, 0x46770001},
+       {0xc78, 0x45780001}, {0xc78, 0x44790001},
+       {0xc78, 0x437a0001}, {0xc78, 0x427b0001},
+       {0xc78, 0x417c0001}, {0xc78, 0x407d0001},
+       {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+       {0xc50, 0x00040022}, {0xc50, 0x00040020},
+       {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
+       {0xc78, 0xfa000001}, {0xc78, 0xf9010001},
+       {0xc78, 0xf8020001}, {0xc78, 0xf7030001},
+       {0xc78, 0xf6040001}, {0xc78, 0xf5050001},
+       {0xc78, 0xf4060001}, {0xc78, 0xf3070001},
+       {0xc78, 0xf2080001}, {0xc78, 0xf1090001},
+       {0xc78, 0xf00a0001}, {0xc78, 0xef0b0001},
+       {0xc78, 0xee0c0001}, {0xc78, 0xed0d0001},
+       {0xc78, 0xec0e0001}, {0xc78, 0xeb0f0001},
+       {0xc78, 0xea100001}, {0xc78, 0xe9110001},
+       {0xc78, 0xe8120001}, {0xc78, 0xe7130001},
+       {0xc78, 0xe6140001}, {0xc78, 0xe5150001},
+       {0xc78, 0xe4160001}, {0xc78, 0xe3170001},
+       {0xc78, 0xe2180001}, {0xc78, 0xe1190001},
+       {0xc78, 0x8a1a0001}, {0xc78, 0x891b0001},
+       {0xc78, 0x881c0001}, {0xc78, 0x871d0001},
+       {0xc78, 0x861e0001}, {0xc78, 0x851f0001},
+       {0xc78, 0x84200001}, {0xc78, 0x83210001},
+       {0xc78, 0x82220001}, {0xc78, 0x6a230001},
+       {0xc78, 0x69240001}, {0xc78, 0x68250001},
+       {0xc78, 0x67260001}, {0xc78, 0x66270001},
+       {0xc78, 0x65280001}, {0xc78, 0x64290001},
+       {0xc78, 0x632a0001}, {0xc78, 0x622b0001},
+       {0xc78, 0x612c0001}, {0xc78, 0x602d0001},
+       {0xc78, 0x472e0001}, {0xc78, 0x462f0001},
+       {0xc78, 0x45300001}, {0xc78, 0x44310001},
+       {0xc78, 0x43320001}, {0xc78, 0x42330001},
+       {0xc78, 0x41340001}, {0xc78, 0x40350001},
+       {0xc78, 0x40360001}, {0xc78, 0x40370001},
+       {0xc78, 0x40380001}, {0xc78, 0x40390001},
+       {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+       {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+       {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+       {0xc78, 0xfa400001}, {0xc78, 0xf9410001},
+       {0xc78, 0xf8420001}, {0xc78, 0xf7430001},
+       {0xc78, 0xf6440001}, {0xc78, 0xf5450001},
+       {0xc78, 0xf4460001}, {0xc78, 0xf3470001},
+       {0xc78, 0xf2480001}, {0xc78, 0xf1490001},
+       {0xc78, 0xf04a0001}, {0xc78, 0xef4b0001},
+       {0xc78, 0xee4c0001}, {0xc78, 0xed4d0001},
+       {0xc78, 0xec4e0001}, {0xc78, 0xeb4f0001},
+       {0xc78, 0xea500001}, {0xc78, 0xe9510001},
+       {0xc78, 0xe8520001}, {0xc78, 0xe7530001},
+       {0xc78, 0xe6540001}, {0xc78, 0xe5550001},
+       {0xc78, 0xe4560001}, {0xc78, 0xe3570001},
+       {0xc78, 0xe2580001}, {0xc78, 0xe1590001},
+       {0xc78, 0x8a5a0001}, {0xc78, 0x895b0001},
+       {0xc78, 0x885c0001}, {0xc78, 0x875d0001},
+       {0xc78, 0x865e0001}, {0xc78, 0x855f0001},
+       {0xc78, 0x84600001}, {0xc78, 0x83610001},
+       {0xc78, 0x82620001}, {0xc78, 0x6a630001},
+       {0xc78, 0x69640001}, {0xc78, 0x68650001},
+       {0xc78, 0x67660001}, {0xc78, 0x66670001},
+       {0xc78, 0x65680001}, {0xc78, 0x64690001},
+       {0xc78, 0x636a0001}, {0xc78, 0x626b0001},
+       {0xc78, 0x616c0001}, {0xc78, 0x606d0001},
+       {0xc78, 0x476e0001}, {0xc78, 0x466f0001},
+       {0xc78, 0x45700001}, {0xc78, 0x44710001},
+       {0xc78, 0x43720001}, {0xc78, 0x42730001},
+       {0xc78, 0x41740001}, {0xc78, 0x40750001},
+       {0xc78, 0x40760001}, {0xc78, 0x40770001},
+       {0xc78, 0x40780001}, {0xc78, 0x40790001},
+       {0xc78, 0x407a0001}, {0xc78, 0x407b0001},
+       {0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+       {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+       {0xc50, 0x00040222}, {0xc50, 0x00040220},
+       {0xffff, 0xffffffff}
+};
+
 static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
        {0x00, 0x00030159}, {0x01, 0x00031284},
        {0x02, 0x00098000}, {0x03, 0x00039c63},
@@ -963,6 +1331,7 @@ static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
        {0xff, 0xffffffff}
 };
 
+#ifdef CONFIG_RTL8XXXU_UNTESTED
 static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
        {0x00, 0x00030159}, {0x01, 0x00031284},
        {0x02, 0x00098000}, {0x03, 0x00018c63},
@@ -1211,6 +1580,153 @@ static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
        {0x00, 0x00030159},
        {0xff, 0xffffffff}
 };
+#endif
+
+static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
+       {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+       {0x00, 0x00030000}, {0x08, 0x00008400},
+       {0x18, 0x00000407}, {0x19, 0x00000012},
+       {0x1b, 0x00000064}, {0x1e, 0x00080009},
+       {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+       {0x3f, 0x00000000}, {0x42, 0x000060c0},
+       {0x57, 0x000d0000}, {0x58, 0x000be180},
+       {0x67, 0x00001552}, {0x83, 0x00000000},
+       {0xb0, 0x000ff9f1}, {0xb1, 0x00055418},
+       {0xb2, 0x0008cc00}, {0xb4, 0x00043083},
+       {0xb5, 0x00008166}, {0xb6, 0x0000803e},
+       {0xb7, 0x0001c69f}, {0xb8, 0x0000407f},
+       {0xb9, 0x00080001}, {0xba, 0x00040001},
+       {0xbb, 0x00000400}, {0xbf, 0x000c0000},
+       {0xc2, 0x00002400}, {0xc3, 0x00000009},
+       {0xc4, 0x00040c91}, {0xc5, 0x00099999},
+       {0xc6, 0x000000a3}, {0xc7, 0x00088820},
+       {0xc8, 0x00076c06}, {0xc9, 0x00000000},
+       {0xca, 0x00080000}, {0xdf, 0x00000180},
+       {0xef, 0x000001a0}, {0x51, 0x00069545},
+       {0x52, 0x0007e45e}, {0x53, 0x00000071},
+       {0x56, 0x00051ff3}, {0x35, 0x000000a8},
+       {0x35, 0x000001e2}, {0x35, 0x000002a8},
+       {0x36, 0x00001c24}, {0x36, 0x00009c24},
+       {0x36, 0x00011c24}, {0x36, 0x00019c24},
+       {0x18, 0x00000c07}, {0x5a, 0x00048000},
+       {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x34, 0x0000a093}, {0x34, 0x0000908f},
+       {0x34, 0x0000808c}, {0x34, 0x0000704d},
+       {0x34, 0x0000604a}, {0x34, 0x00005047},
+       {0x34, 0x0000400a}, {0x34, 0x00003007},
+       {0x34, 0x00002004}, {0x34, 0x00001001},
+       {0x34, 0x00000000},
+#else
+       /* Regular */
+       {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+       {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+       {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+       {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+       {0x34, 0x0000244f}, {0x34, 0x0000144c},
+       {0x34, 0x00000014},
+#endif
+       {0x00, 0x00030159},
+       {0x84, 0x00068180},
+       {0x86, 0x0000014e},
+       {0x87, 0x00048e00},
+       {0x8e, 0x00065540},
+       {0x8f, 0x00088000},
+       {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x3b, 0x000f07b0},
+#else
+       {0x3b, 0x000f02b0},
+#endif
+       {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+       {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+       {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+       {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x3b, 0x000787b0},
+#else
+       {0x3b, 0x00078730},
+#endif
+       {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+       {0x3b, 0x00040620}, {0x3b, 0x00037090},
+       {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+       {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+       {0xfe, 0x00000000}, {0x18, 0x0000fc07},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0x1e, 0x00000001}, {0x1f, 0x00080000},
+       {0x00, 0x00033e70},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
+       {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+       {0x00, 0x00030000}, {0x08, 0x00008400},
+       {0x18, 0x00000407}, {0x19, 0x00000012},
+       {0x1b, 0x00000064}, {0x1e, 0x00080009},
+       {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+       {0x3f, 0x00000000}, {0x42, 0x000060c0},
+       {0x57, 0x000d0000}, {0x58, 0x000be180},
+       {0x67, 0x00001552}, {0x7f, 0x00000082},
+       {0x81, 0x0003f000}, {0x83, 0x00000000},
+       {0xdf, 0x00000180}, {0xef, 0x000001a0},
+       {0x51, 0x00069545}, {0x52, 0x0007e42e},
+       {0x53, 0x00000071}, {0x56, 0x00051ff3},
+       {0x35, 0x000000a8}, {0x35, 0x000001e0},
+       {0x35, 0x000002a8}, {0x36, 0x00001ca8},
+       {0x36, 0x00009c24}, {0x36, 0x00011c24},
+       {0x36, 0x00019c24}, {0x18, 0x00000c07},
+       {0x5a, 0x00048000}, {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x34, 0x0000a093}, {0x34, 0x0000908f},
+       {0x34, 0x0000808c}, {0x34, 0x0000704d},
+       {0x34, 0x0000604a}, {0x34, 0x00005047},
+       {0x34, 0x0000400a}, {0x34, 0x00003007},
+       {0x34, 0x00002004}, {0x34, 0x00001001},
+       {0x34, 0x00000000},
+#else
+       {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+       {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+       {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+       {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+       {0x34, 0x0000244f}, {0x34, 0x0000144c},
+       {0x34, 0x00000014},
+#endif
+       {0x00, 0x00030159}, {0x84, 0x00068180},
+       {0x86, 0x000000ce}, {0x87, 0x00048a00},
+       {0x8e, 0x00065540}, {0x8f, 0x00088000},
+       {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x3b, 0x000f07b0},
+#else
+       {0x3b, 0x000f02b0},
+#endif
+
+       {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+       {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+       {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+       {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+       /* External PA or external LNA */
+       {0x3b, 0x000787b0},
+#else
+       {0x3b, 0x00078730},
+#endif
+       {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+       {0x3b, 0x00040620}, {0x3b, 0x00037090},
+       {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+       {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+       {0x00, 0x00010159}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0x1e, 0x00000001},
+       {0x1f, 0x00080000}, {0x00, 0x00033e70},
+       {0xff, 0xffffffff}
+};
 
 static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
        {       /* RF_A */
@@ -1231,7 +1747,7 @@ static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
        },
 };
 
-static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+static const u32 rtl8xxxu_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
        REG_OFDM0_XA_RX_IQ_IMBALANCE,
        REG_OFDM0_XB_RX_IQ_IMBALANCE,
        REG_OFDM0_ENERGY_CCA_THRES,
@@ -1450,7 +1966,7 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
                                enum rtl8xxxu_rfpath path, u8 reg, u32 data)
 {
        int ret, retval;
-       u32 dataaddr;
+       u32 dataaddr, val32;
 
        if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
                dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
@@ -1459,6 +1975,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
        data &= FPGA0_LSSI_PARM_DATA_MASK;
        dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
 
+       if (priv->rtl_chip == RTL8192E) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+               val32 &= ~0x20000;
+               rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+       }
+
        /* Use XB for path B */
        ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
        if (ret != sizeof(dataaddr))
@@ -1468,6 +1990,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
 
        udelay(1);
 
+       if (priv->rtl_chip == RTL8192E) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+               val32 |= 0x20000;
+               rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+       }
+
        return retval;
 }
 
@@ -1552,7 +2080,7 @@ static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
 }
 
-static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv)
 {
        u8 val8;
        u32 val32;
@@ -1574,7 +2102,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
        val32 &= ~OFDM_RF_PATH_TX_MASK;
        if (priv->tx_paths == 2)
                val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
-       else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+       else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C)
                val32 |= OFDM_RF_PATH_TX_B;
        else
                val32 |= OFDM_RF_PATH_TX_A;
@@ -1596,13 +2124,11 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
        rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
 }
 
-static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv)
 {
        u8 sps0;
        u32 val32;
 
-       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
        sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
 
        /* RF RX code for preamble power saving */
@@ -1676,7 +2202,10 @@ static int rtl8723a_channel_to_group(int channel)
        return group;
 }
 
-static int rtl8723b_channel_to_group(int channel)
+/*
+ * Valid for rtl8723bu and rtl8192eu
+ */
+static int rtl8xxxu_gen2_channel_to_group(int channel)
 {
        int group;
 
@@ -1694,7 +2223,7 @@ static int rtl8723b_channel_to_group(int channel)
        return group;
 }
 
-static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+static void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
        u32 val32, rsr;
@@ -1816,7 +2345,7 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw)
        }
 }
 
-static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
+static void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
        u32 val32, rsr;
@@ -1947,8 +2476,9 @@ static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
 }
 
 static void
-rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
 {
+       struct rtl8xxxu_power_base *power_base = priv->power_base;
        u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
        u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
        u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
@@ -1957,11 +2487,22 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
 
        group = rtl8723a_channel_to_group(channel);
 
-       cck[0] = priv->cck_tx_power_index_A[group];
-       cck[1] = priv->cck_tx_power_index_B[group];
+       cck[0] = priv->cck_tx_power_index_A[group] - 1;
+       cck[1] = priv->cck_tx_power_index_B[group] - 1;
+
+       if (priv->hi_pa) {
+               if (cck[0] > 0x20)
+                       cck[0] = 0x20;
+               if (cck[1] > 0x20)
+                       cck[1] = 0x20;
+       }
 
        ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
        ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+       if (ofdm[0])
+               ofdm[0] -= 1;
+       if (ofdm[1])
+               ofdm[1] -= 1;
 
        ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
        ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -2017,27 +2558,39 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
                ofdmbase[0] << 16 | ofdmbase[0] << 24;
        ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
                ofdmbase[1] << 16 | ofdmbase[1] << 24;
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
 
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06,
+                        ofdm_a + power_base->reg_0e00);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06,
+                        ofdm_b + power_base->reg_0830);
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24,
+                        ofdm_a + power_base->reg_0e04);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24,
+                        ofdm_b + power_base->reg_0834);
 
        mcs_a = mcsbase[0] | mcsbase[0] << 8 |
                mcsbase[0] << 16 | mcsbase[0] << 24;
        mcs_b = mcsbase[1] | mcsbase[1] << 8 |
                mcsbase[1] << 16 | mcsbase[1] << 24;
 
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00,
+                        mcs_a + power_base->reg_0e10);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00,
+                        mcs_b + power_base->reg_083c);
 
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04,
+                        mcs_a + power_base->reg_0e14);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04,
+                        mcs_b + power_base->reg_0848);
 
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08,
+                        mcs_a + power_base->reg_0e18);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08,
+                        mcs_b + power_base->reg_084c);
 
-       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+                        mcs_a + power_base->reg_0e1c);
        for (i = 0; i < 3; i++) {
                if (i != 2)
                        val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
@@ -2045,7 +2598,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
                        val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
                rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
        }
-       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+                        mcs_b + power_base->reg_0868);
        for (i = 0; i < 3; i++) {
                if (i != 2)
                        val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
@@ -2063,7 +2617,7 @@ rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
        int group, tx_idx;
 
        tx_idx = 0;
-       group = rtl8723b_channel_to_group(channel);
+       group = rtl8xxxu_gen2_channel_to_group(channel);
 
        cck = priv->cck_tx_power_index_B[group];
        val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
@@ -2094,6 +2648,82 @@ rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
        rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
 }
 
+static void
+rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+       u32 val32, ofdm, mcs;
+       u8 cck, ofdmbase, mcsbase;
+       int group, tx_idx;
+
+       tx_idx = 0;
+       group = rtl8xxxu_gen2_channel_to_group(channel);
+
+       cck = priv->cck_tx_power_index_A[group];
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+       val32 &= 0xffff00ff;
+       val32 |= (cck << 8);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+       val32 &= 0xff;
+       val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+       ofdmbase = priv->ht40_1s_tx_power_index_A[group];
+       ofdmbase += priv->ofdm_tx_power_diff[tx_idx].a;
+       ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+       mcsbase = priv->ht40_1s_tx_power_index_A[group];
+       if (ht40)
+               mcsbase += priv->ht40_tx_power_diff[tx_idx++].a;
+       else
+               mcsbase += priv->ht20_tx_power_diff[tx_idx++].a;
+       mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs);
+
+       if (priv->tx_paths > 1) {
+               cck = priv->cck_tx_power_index_B[group];
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+               val32 &= 0xff;
+               val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+               val32 &= 0xffffff00;
+               val32 |= cck;
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+               ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+               ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+               ofdm = ofdmbase | ofdmbase << 8 |
+                       ofdmbase << 16 | ofdmbase << 24;
+
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm);
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm);
+
+               mcsbase = priv->ht40_1s_tx_power_index_B[group];
+               if (ht40)
+                       mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+               else
+                       mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+               mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs);
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs);
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs);
+               rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs);
+       }
+}
+
 static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
                                  enum nl80211_iftype linktype)
 {
@@ -2199,11 +2829,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
        if (val32 & SYS_CFG_BT_FUNC) {
                if (priv->chip_cut >= 3) {
                        sprintf(priv->chip_name, "8723BU");
-                       priv->rtlchip = 0x8723b;
+                       priv->rtl_chip = RTL8723B;
                } else {
                        sprintf(priv->chip_name, "8723AU");
                        priv->usb_interrupts = 1;
-                       priv->rtlchip = 0x8723a;
+                       priv->rtl_chip = RTL8723A;
                }
 
                priv->rf_paths = 1;
@@ -2221,19 +2851,20 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
        } else if (val32 & SYS_CFG_TYPE_ID) {
                bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
                bonding &= HPON_FSM_BONDING_MASK;
-               if (priv->chip_cut >= 3) {
+               if (priv->fops->tx_desc_size ==
+                   sizeof(struct rtl8xxxu_txdesc40)) {
                        if (bonding == HPON_FSM_BONDING_1T2R) {
                                sprintf(priv->chip_name, "8191EU");
                                priv->rf_paths = 2;
                                priv->rx_paths = 2;
                                priv->tx_paths = 1;
-                               priv->rtlchip = 0x8191e;
+                               priv->rtl_chip = RTL8191E;
                        } else {
                                sprintf(priv->chip_name, "8192EU");
                                priv->rf_paths = 2;
                                priv->rx_paths = 2;
                                priv->tx_paths = 2;
-                               priv->rtlchip = 0x8192e;
+                               priv->rtl_chip = RTL8192E;
                        }
                } else if (bonding == HPON_FSM_BONDING_1T2R) {
                        sprintf(priv->chip_name, "8191CU");
@@ -2241,14 +2872,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
                        priv->rx_paths = 2;
                        priv->tx_paths = 1;
                        priv->usb_interrupts = 1;
-                       priv->rtlchip = 0x8191c;
+                       priv->rtl_chip = RTL8191C;
                } else {
                        sprintf(priv->chip_name, "8192CU");
                        priv->rf_paths = 2;
                        priv->rx_paths = 2;
                        priv->tx_paths = 2;
                        priv->usb_interrupts = 1;
-                       priv->rtlchip = 0x8192c;
+                       priv->rtl_chip = RTL8192C;
                }
                priv->has_wifi = 1;
        } else {
@@ -2256,15 +2887,15 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
                priv->rf_paths = 1;
                priv->rx_paths = 1;
                priv->tx_paths = 1;
-               priv->rtlchip = 0x8188c;
+               priv->rtl_chip = RTL8188C;
                priv->usb_interrupts = 1;
                priv->has_wifi = 1;
        }
 
-       switch (priv->rtlchip) {
-       case 0x8188e:
-       case 0x8192e:
-       case 0x8723b:
+       switch (priv->rtl_chip) {
+       case RTL8188E:
+       case RTL8192E:
+       case RTL8723B:
                switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
                case SYS_CFG_VENDOR_ID_TSMC:
                        sprintf(priv->chip_vendor, "TSMC");
@@ -2375,6 +3006,9 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
                priv->has_xtalk = 1;
                priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
        }
+
+       priv->power_base = &rtl8723a_power_base;
+
        dev_info(&priv->udev->dev, "Vendor: %.7s\n",
                 efuse->vendor_name);
        dev_info(&priv->udev->dev, "Product: %.41s\n",
@@ -2507,9 +3141,14 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
        dev_info(&priv->udev->dev, "Product: %.20s\n",
                 efuse->device_name);
 
+       priv->power_base = &rtl8192c_power_base;
+
        if (efuse->rf_regulatory & 0x20) {
                sprintf(priv->chip_name, "8188RU");
+               priv->rtl_chip = RTL8188R;
                priv->hi_pa = 1;
+               priv->no_pape = 1;
+               priv->power_base = &rtl8188r_power_base;
        }
 
        if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
@@ -2541,6 +3180,43 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
 
        ether_addr_copy(priv->mac_addr, efuse->mac_addr);
 
+       memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+              sizeof(efuse->tx_power_index_A.cck_base));
+       memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+              sizeof(efuse->tx_power_index_B.cck_base));
+
+       memcpy(priv->ht40_1s_tx_power_index_A,
+              efuse->tx_power_index_A.ht40_base,
+              sizeof(efuse->tx_power_index_A.ht40_base));
+       memcpy(priv->ht40_1s_tx_power_index_B,
+              efuse->tx_power_index_B.ht40_base,
+              sizeof(efuse->tx_power_index_B.ht40_base));
+
+       priv->ht20_tx_power_diff[0].a =
+               efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+       priv->ht20_tx_power_diff[0].b =
+               efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+       priv->ht40_tx_power_diff[0].a = 0;
+       priv->ht40_tx_power_diff[0].b = 0;
+
+       for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+               priv->ofdm_tx_power_diff[i].a =
+                       efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+               priv->ofdm_tx_power_diff[i].b =
+                       efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+               priv->ht20_tx_power_diff[i].a =
+                       efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+               priv->ht20_tx_power_diff[i].b =
+                       efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+               priv->ht40_tx_power_diff[i].a =
+                       efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+               priv->ht40_tx_power_diff[i].b =
+                       efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+       }
+
        priv->has_xtalk = 1;
        priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
 
@@ -2562,10 +3238,6 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
                                 raw[i + 6], raw[i + 7]);
                }
        }
-       /*
-        * Temporarily disable 8192eu support
-        */
-       return -EINVAL;
        return 0;
 }
 
@@ -2814,7 +3486,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
        /*
         * Init H2C command
         */
-       if (priv->rtlchip == 0x8723b)
+       if (priv->rtl_chip == RTL8723B)
                rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
 exit:
        return ret;
@@ -2997,7 +3669,7 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
 
        if (!priv->vendor_umc)
                fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
-       else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+       else if (priv->chip_cut || priv->rtl_chip == RTL8192C)
                fw_name = "rtlwifi/rtl8192cufw_B.bin";
        else
                fw_name = "rtlwifi/rtl8192cufw_A.bin";
@@ -3052,9 +3724,9 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
 {
        u32 val32;
 
-       val32 = rtl8xxxu_read32(priv, 0x64);
+       val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1);
        val32 &= ~(BIT(20) | BIT(24));
-       rtl8xxxu_write32(priv, 0x64, val32);
+       rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32);
 
        val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
        val32 &= ~BIT(4);
@@ -3087,8 +3759,9 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
 }
 
 static int
-rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
 {
+       struct rtl8xxxu_reg8val *array = priv->fops->mactable;
        int i, ret;
        u16 reg;
        u8 val;
@@ -3103,12 +3776,13 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
                ret = rtl8xxxu_write8(priv, reg, val);
                if (ret != 1) {
                        dev_warn(&priv->udev->dev,
-                                "Failed to initialize MAC\n");
+                                "Failed to initialize MAC "
+                                "(reg: %04x, val %02x)\n", reg, val);
                        return -EAGAIN;
                }
        }
 
-       if (priv->rtlchip != 0x8723b)
+       if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
                rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
 
        return 0;
@@ -3140,50 +3814,30 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
        return 0;
 }
 
-/*
- * Most of this is black magic retrieved from the old rtl8723au driver
- */
-static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv)
 {
        u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
        u16 val16;
        u32 val32;
 
-       /*
-        * Todo: The vendor driver maintains a table of PHY register
-        *       addresses, which is initialized here. Do we need this?
-        */
-
-       if (priv->rtlchip == 0x8723b) {
-               val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
-               val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
-                       SYS_FUNC_DIO_RF;
-               rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
-               rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
-       } else {
-               val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
-               udelay(2);
-               val8 |= AFE_PLL_320_ENABLE;
-               rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
-               udelay(2);
+       val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+       udelay(2);
+       val8 |= AFE_PLL_320_ENABLE;
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+       udelay(2);
 
-               rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
-               udelay(2);
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+       udelay(2);
 
-               val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
-               val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
-               rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-       }
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
 
-       if (priv->rtlchip != 0x8723b) {
-               /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
-               val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
-               val32 &= ~AFE_XTAL_RF_GATE;
-               if (priv->has_bluetooth)
-                       val32 &= ~AFE_XTAL_BT_GATE;
-               rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
-       }
+       val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+       val32 &= ~AFE_XTAL_RF_GATE;
+       if (priv->has_bluetooth)
+               val32 &= ~AFE_XTAL_BT_GATE;
+       rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
 
        /* 6. 0x1f[7:0] = 0x07 */
        val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
@@ -3193,45 +3847,112 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
                rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
        else if (priv->tx_paths == 2)
                rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
-       else if (priv->rtlchip == 0x8723b) {
-               /*
-                * Why?
-                */
-               rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
-               rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
-               rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
-       } else
+       else
                rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
 
-
-       if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+       if (priv->rtl_chip == RTL8188R && priv->hi_pa &&
            priv->vendor_umc && priv->chip_cut == 1)
                rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
 
-       if (priv->tx_paths == 1 && priv->rx_paths == 2) {
-               /*
-                * For 1T2R boards, patch the registers.
-                *
-                * It looks like 8191/2 1T2R boards use path B for TX
-                */
-               val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
-               val32 &= ~(BIT(0) | BIT(1));
-               val32 |= BIT(1);
-               rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+       if (priv->hi_pa)
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+       else
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
 
-               val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
-               val32 &= ~0x300033;
-               val32 |= 0x200022;
-               rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+       ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+       ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+       ldohci12 = 0x57;
+       lpldo = 1;
+       val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+       rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+}
 
-               val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
-               val32 &= 0xff000000;
-               val32 |= 0x45000000;
-               rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u16 val16;
 
-               val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
-               val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
-               val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+       rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+       /* 6. 0x1f[7:0] = 0x07 */
+       val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+       rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+       /* Why? */
+       rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
+       rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
+       rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
+
+       rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
+}
+
+static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u16 val16;
+
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+       /* 6. 0x1f[7:0] = 0x07 */
+       val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+       rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF |
+                 SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB);
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+       val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+       rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+       rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table);
+
+       if (priv->hi_pa)
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_highpa_table);
+       else
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_std_table);
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u32 val32;
+
+       priv->fops->init_phy_bb(priv);
+
+       if (priv->tx_paths == 1 && priv->rx_paths == 2) {
+               /*
+                * For 1T2R boards, patch the registers.
+                *
+                * It looks like 8191/2 1T2R boards use path B for TX
+                */
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
+               val32 &= ~(BIT(0) | BIT(1));
+               val32 |= BIT(1);
+               rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
+               val32 &= ~0x300033;
+               val32 |= 0x200022;
+               rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+               val32 &= ~CCK0_AFE_RX_MASK;
+               val32 &= 0x00ffffff;
+               val32 |= 0x40000000;
+               val32 |= CCK0_AFE_RX_ANT_B;
+               rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+               val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+               val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
                          OFDM_RF_PATH_TX_B);
                rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
 
@@ -3266,13 +3987,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
                rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
        }
 
-       if (priv->rtlchip == 0x8723b)
-               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
-       else if (priv->hi_pa)
-               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
-       else
-               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
-
        if (priv->has_xtalk) {
                val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
 
@@ -3283,16 +3997,8 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
                rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
        }
 
-       if (priv->rtlchip != 0x8723bu) {
-               ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
-               ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
-               ldohci12 = 0x57;
-               lpldo = 1;
-               val32 = (lpldo << 24) | (ldohci12 << 16) |
-                       (ldov12d << 8) | ldoa15;
-
-               rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
-       }
+       if (priv->rtl_chip == RTL8192E)
+               rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb);
 
        return 0;
 }
@@ -3410,6 +4116,77 @@ static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
        return 0;
 }
 
+static int rtl8723au_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+       int ret;
+
+       ret = rtl8xxxu_init_phy_rf(priv, rtl8723au_radioa_1t_init_table, RF_A);
+
+       /* Reduce 80M spur */
+       rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+       return ret;
+}
+
+static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+       int ret;
+
+       ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A);
+       /*
+        * PHY LCK
+        */
+       rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
+       msleep(200);
+       rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
+
+       return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+       struct rtl8xxxu_rfregval *rftable;
+       int ret;
+
+       if (priv->rtl_chip == RTL8188R) {
+               rftable = rtl8188ru_radioa_1t_highpa_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+       } else if (priv->rf_paths == 1) {
+               rftable = rtl8192cu_radioa_1t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+       } else {
+               rftable = rtl8192cu_radioa_2t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+               if (ret)
+                       goto exit;
+               rftable = rtl8192cu_radiob_2t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+       }
+
+exit:
+       return ret;
+}
+#endif
+
+static int rtl8192eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+       int ret;
+
+       ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radioa_init_table, RF_A);
+       if (ret)
+               goto exit;
+
+       ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radiob_init_table, RF_B);
+
+exit:
+       return ret;
+}
+
 static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
 {
        int ret = -EBUSY;
@@ -3818,8 +4595,8 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
        return false;
 }
 
-static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
-                                        int result[][8], int c1, int c2)
+static bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+                                            int result[][8], int c1, int c2)
 {
        u32 i, j, diff, simubitmap, bound = 0;
        int candidate[2] = {-1, -1};    /* for path A and path B */
@@ -4389,137 +5166,424 @@ out:
        return result;
 }
 
-#ifdef RTL8723BU_PATH_B
-static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv)
+static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv)
 {
-       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel;
+       u32 reg_eac, reg_e94, reg_e9c;
        int result = 0;
 
-       path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+       /*
+        * TX IQK
+        * PA/PAD controlled by 0x0
+        */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00180);
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
 
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-       val32 &= 0x000000ff;
-       rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+       /* Path A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
 
-       /* One shot, path B LOK & IQK */
-       rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
-       rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140303);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160000);
 
-       mdelay(1);
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(10);
 
        /* Check failed */
        reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
-       reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
-       reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
-       reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
-       reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+       reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+       reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
 
-       if (!(reg_eac & BIT(31)) &&
-           ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
-           ((reg_ebc & 0x03ff0000) != 0x00420000))
+       if (!(reg_eac & BIT(28)) &&
+           ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+           ((reg_e9c & 0x03ff0000) != 0x00420000))
                result |= 0x01;
-       else
-               goto out;
 
-       if (!(reg_eac & BIT(30)) &&
-           (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
-           (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
-               result |= 0x02;
-       else
-               dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
-                        __func__);
-out:
        return result;
 }
-#endif
 
-static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
-                                    int result[][8], int t)
+static int rtl8192eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
 {
-       struct device *dev = &priv->udev->dev;
-       u32 i, val32;
-       int path_a_ok, path_b_ok;
-       int retry = 2;
-       const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
-               REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
-               REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
-               REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
-               REG_TX_OFDM_BBON, REG_TX_TO_RX,
-               REG_TX_TO_TX, REG_RX_CCK,
-               REG_RX_OFDM, REG_RX_WAIT_RIFS,
-               REG_RX_TO_RX, REG_STANDBY,
-               REG_SLEEP, REG_PMPD_ANAEN
-       };
-       const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
-               REG_TXPAUSE, REG_BEACON_CTRL,
-               REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
-       };
-       const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
-               REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
-               REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
-               REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
-               REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
-       };
-
-       /*
-        * Note: IQ calibration must be performed after loading
-        *       PHY_REG.txt , and radio_a, radio_b.txt
-        */
+       u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+       int result = 0;
 
-       if (t == 0) {
-               /* Save ADDA parameters, turn Path A ADDA on */
-               rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
-                                  RTL8XXXU_ADDA_REGS);
-               rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
-               rtl8xxxu_save_regs(priv, iqk_bb_regs,
-                                  priv->bb_backup, RTL8XXXU_BB_REGS);
-       }
+       /* Leave IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00);
 
-       rtl8xxxu_path_adda_on(priv, adda_regs, true);
+       /* Enable path A PA in TX IQK mode */
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
 
-       if (t == 0) {
-               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
-               if (val32 & FPGA0_HSSI_PARM1_PI)
-                       priv->pi_enabled = 1;
-       }
+       /* PA/PAD control by 0x56, and set = 0x0 */
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
 
-       if (!priv->pi_enabled) {
-               /* Switch BB to PI mode to do IQ Calibration. */
-               rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
-               rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
-       }
+       /* Enter IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
 
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
-       val32 &= ~FPGA_RF_MODE_CCK;
-       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+       /* TX IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
 
-       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
-       rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
-       rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+       /* path-A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
 
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
-       val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
-       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160c1f);
 
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
-       val32 &= ~BIT(10);
-       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
-       val32 &= ~BIT(10);
-       rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
 
-       if (priv->tx_paths > 1) {
-               rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
-               rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
-       }
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
 
-       /* MAC settings */
-       rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+       mdelay(10);
 
-       /* Page B init */
-       rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+       /* Check failed */
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+       reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
 
-       if (priv->tx_paths > 1)
+       if (!(reg_eac & BIT(28)) &&
+           ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+           ((reg_e9c & 0x03ff0000) != 0x00420000)) {
+               result |= 0x01;
+       } else {
+               /* PA/PAD controlled by 0x0 */
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+               rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+               goto out;
+       }
+
+       val32 = 0x80007c00 |
+               (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+       rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+       /* Modify RX IQK mode table */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+       /* PA/PAD control by 0x56, and set = 0x0 */
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+       /* Enter IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+       /* IQK setting */
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+       /* Path A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(10);
+
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+       if (!(reg_eac & BIT(27)) &&
+           ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+           ((reg_eac & 0x03ff0000) != 0x00360000))
+               result |= 0x02;
+       else
+               dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+                        __func__);
+
+out:
+       return result;
+}
+
+static int rtl8192eu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+       u32 reg_eac, reg_eb4, reg_ebc;
+       int result = 0;
+
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00180);
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+       /* Path B IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x821403e2);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160000);
+
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00492911);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(1);
+
+       /* Check failed */
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+       reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+       if (!(reg_eac & BIT(31)) &&
+           ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+           ((reg_ebc & 0x03ff0000) != 0x00420000))
+               result |= 0x01;
+       else
+               dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n",
+                        __func__);
+
+       return result;
+}
+
+static int rtl8192eu_rx_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32;
+       int result = 0;
+
+       /* Leave IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+       /* Enable path A PA in TX IQK mode */
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf117b);
+
+       /* PA/PAD control by 0x56, and set = 0x0 */
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+       /* Enter IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+       /* TX IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+       /* path-A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160c1f);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160c1f);
+
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(10);
+
+       /* Check failed */
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+       reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+       if (!(reg_eac & BIT(31)) &&
+           ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+           ((reg_ebc & 0x03ff0000) != 0x00420000)) {
+               result |= 0x01;
+       } else {
+               /*
+                * PA/PAD controlled by 0x0
+                * Vendor driver restores RF_A here which I believe is a bug
+                */
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+               rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+               goto out;
+       }
+
+       val32 = 0x80007c00 |
+               (reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff);
+       rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+       /* Modify RX IQK mode table */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+       /* PA/PAD control by 0x56, and set = 0x0 */
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+       /* Enter IQK mode */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+       /* IQK setting */
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+       /* Path A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c);
+
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(10);
+
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+       reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+
+       if (!(reg_eac & BIT(30)) &&
+           ((reg_ec4 & 0x03ff0000) != 0x01320000) &&
+           ((reg_ecc & 0x03ff0000) != 0x00360000))
+               result |= 0x02;
+       else
+               dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+                        __func__);
+
+out:
+       return result;
+}
+
+static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+                                    int result[][8], int t)
+{
+       struct device *dev = &priv->udev->dev;
+       u32 i, val32;
+       int path_a_ok, path_b_ok;
+       int retry = 2;
+       const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+               REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+               REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+               REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+               REG_TX_OFDM_BBON, REG_TX_TO_RX,
+               REG_TX_TO_TX, REG_RX_CCK,
+               REG_RX_OFDM, REG_RX_WAIT_RIFS,
+               REG_RX_TO_RX, REG_STANDBY,
+               REG_SLEEP, REG_PMPD_ANAEN
+       };
+       const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+               REG_TXPAUSE, REG_BEACON_CTRL,
+               REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+       };
+       const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+               REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+               REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+               REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+               REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+       };
+
+       /*
+        * Note: IQ calibration must be performed after loading
+        *       PHY_REG.txt , and radio_a, radio_b.txt
+        */
+
+       if (t == 0) {
+               /* Save ADDA parameters, turn Path A ADDA on */
+               rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+                                  RTL8XXXU_ADDA_REGS);
+               rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+               rtl8xxxu_save_regs(priv, iqk_bb_regs,
+                                  priv->bb_backup, RTL8XXXU_BB_REGS);
+       }
+
+       rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+       if (t == 0) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+               if (val32 & FPGA0_HSSI_PARM1_PI)
+                       priv->pi_enabled = 1;
+       }
+
+       if (!priv->pi_enabled) {
+               /* Switch BB to PI mode to do IQ Calibration. */
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+               rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       val32 &= ~FPGA_RF_MODE_CCK;
+       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+       rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+       rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+       if (!priv->no_pape) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+               val32 |= (FPGA0_RF_PAPE |
+                         (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+               rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+       val32 &= ~BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+       val32 &= ~BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+       if (priv->tx_paths > 1) {
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+               rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
+       }
+
+       /* MAC settings */
+       rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+       /* Page B init */
+       rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+
+       if (priv->tx_paths > 1)
                rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000);
 
        /* IQ calibration setting */
@@ -4692,20 +5756,6 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
        rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
        rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
 
-#ifdef RTL8723BU_PATH_B
-       /* Set RF mode to standby Path B */
-       if (priv->tx_paths > 1)
-               rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000);
-#endif
-
-#if 0
-       /* Page B init */
-       rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
-
-       if (priv->tx_paths > 1)
-               rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000);
-#endif
-
        /*
         * RX IQ calibration setting for 8723B D cut large current issue
         * when leaving IPS
@@ -4714,33 +5764,224 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
        val32 &= 0x000000ff;
        rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
 
-       val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
-       val32 |= 0x80000;
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+       val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+       val32 |= 0x80000;
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+       val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+       val32 |= 0x20;
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
+
+       for (i = 0; i < retry; i++) {
+               path_a_ok = rtl8723bu_iqk_path_a(priv);
+               if (path_a_ok == 0x01) {
+                       val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+                       val32 &= 0x000000ff;
+                       rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_BEFORE_IQK_A);
+                       result[t][0] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_AFTER_IQK_A);
+                       result[t][1] = (val32 >> 16) & 0x3ff;
+
+                       break;
+               }
+       }
+
+       if (!path_a_ok)
+               dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+       for (i = 0; i < retry; i++) {
+               path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
+               if (path_a_ok == 0x03) {
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_RX_POWER_BEFORE_IQK_A_2);
+                       result[t][2] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_RX_POWER_AFTER_IQK_A_2);
+                       result[t][3] = (val32 >> 16) & 0x3ff;
+
+                       break;
+               }
+       }
+
+       if (!path_a_ok)
+               dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+       if (priv->tx_paths > 1) {
+#if 1
+               dev_warn(dev, "%s: Path B not supported\n", __func__);
+#else
+
+               /*
+                * Path A into standby
+                */
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+               val32 &= 0x000000ff;
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+               rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+               val32 &= 0x000000ff;
+               val32 |= 0x80800000;
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+               /* Turn Path B ADDA on */
+               rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+               for (i = 0; i < retry; i++) {
+                       path_b_ok = rtl8xxxu_iqk_path_b(priv);
+                       if (path_b_ok == 0x03) {
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+                               result[t][4] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+                               result[t][5] = (val32 >> 16) & 0x3ff;
+                               break;
+                       }
+               }
+
+               if (!path_b_ok)
+                       dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+               for (i = 0; i < retry; i++) {
+                       path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
+                       if (path_a_ok == 0x03) {
+                               val32 = rtl8xxxu_read32(priv,
+                                                       REG_RX_POWER_BEFORE_IQK_B_2);
+                               result[t][6] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv,
+                                                       REG_RX_POWER_AFTER_IQK_B_2);
+                               result[t][7] = (val32 >> 16) & 0x3ff;
+                               break;
+                       }
+               }
+
+               if (!path_b_ok)
+                       dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+#endif
+       }
+
+       /* Back to BB mode, load original value */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+       val32 &= 0x000000ff;
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+       if (t) {
+               /* Reload ADDA power saving parameters */
+               rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+                                     RTL8XXXU_ADDA_REGS);
+
+               /* Reload MAC parameters */
+               rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+               /* Reload BB parameters */
+               rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+                                     priv->bb_backup, RTL8XXXU_BB_REGS);
+
+               /* Restore RX initial gain */
+               val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+               val32 &= 0xffffff00;
+               rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+               rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+               if (priv->tx_paths > 1) {
+                       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+                       val32 &= 0xffffff00;
+                       rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+                                        val32 | 0x50);
+                       rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+                                        val32 | xb_agc);
+               }
+
+               /* Load 0xe30 IQC default value */
+               rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+               rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+       }
+}
+
+static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+                                     int result[][8], int t)
+{
+       struct device *dev = &priv->udev->dev;
+       u32 i, val32;
+       int path_a_ok, path_b_ok;
+       int retry = 2;
+       const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+               REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+               REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+               REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+               REG_TX_OFDM_BBON, REG_TX_TO_RX,
+               REG_TX_TO_TX, REG_RX_CCK,
+               REG_RX_OFDM, REG_RX_WAIT_RIFS,
+               REG_RX_TO_RX, REG_STANDBY,
+               REG_SLEEP, REG_PMPD_ANAEN
+       };
+       const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+               REG_TXPAUSE, REG_BEACON_CTRL,
+               REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+       };
+       const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+               REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+               REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+               REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+               REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+       };
+       u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+       u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+       /*
+        * Note: IQ calibration must be performed after loading
+        *       PHY_REG.txt , and radio_a, radio_b.txt
+        */
+
+       if (t == 0) {
+               /* Save ADDA parameters, turn Path A ADDA on */
+               rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+                                  RTL8XXXU_ADDA_REGS);
+               rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+               rtl8xxxu_save_regs(priv, iqk_bb_regs,
+                                  priv->bb_backup, RTL8XXXU_BB_REGS);
+       }
+
+       rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+       /* MAC settings */
+       rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
 
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+       val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+       val32 |= 0x0f000000;
+       rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
 
-       val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
-       val32 |= 0x20;
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+       rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+       rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22208200);
 
-       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+       val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
 
-       for (i = 0; i < retry; i++) {
-               path_a_ok = rtl8723bu_iqk_path_a(priv);
-               if (path_a_ok == 0x01) {
-                       val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-                       val32 &= 0x000000ff;
-                       rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+       val32 |= BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+       val32 |= BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
 
-#if 0 /* Only needed in restore case, we may need this when going to suspend */
-                       priv->RFCalibrateInfo.TxLOK[RF_A] =
-                               rtl8xxxu_read_rfreg(priv, RF_A,
-                                                   RF6052_REG_TXM_IDAC);
-#endif
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+       rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
 
+       for (i = 0; i < retry; i++) {
+               path_a_ok = rtl8192eu_iqk_path_a(priv);
+               if (path_a_ok == 0x01) {
                        val32 = rtl8xxxu_read32(priv,
                                                REG_TX_POWER_BEFORE_IQK_A);
                        result[t][0] = (val32 >> 16) & 0x3ff;
@@ -4756,7 +5997,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
                dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
 
        for (i = 0; i < retry; i++) {
-               path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
+               path_a_ok = rtl8192eu_rx_iqk_path_a(priv);
                if (path_a_ok == 0x03) {
                        val32 = rtl8xxxu_read32(priv,
                                                REG_RX_POWER_BEFORE_IQK_A_2);
@@ -4772,30 +6013,22 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
        if (!path_a_ok)
                dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
 
-       if (priv->tx_paths > 1) {
-#if 1
-               dev_warn(dev, "%s: Path B not supported\n", __func__);
-#else
-
-               /*
-                * Path A into standby
-                */
-               val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-               val32 &= 0x000000ff;
-               rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+       if (priv->rf_paths > 1) {
+               /* Path A into standby */
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
                rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
-
-               val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-               val32 &= 0x000000ff;
-               val32 |= 0x80800000;
-               rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
 
                /* Turn Path B ADDA on */
                rtl8xxxu_path_adda_on(priv, adda_regs, false);
 
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+               rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+               rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
                for (i = 0; i < retry; i++) {
-                       path_b_ok = rtl8xxxu_iqk_path_b(priv);
-                       if (path_b_ok == 0x03) {
+                       path_b_ok = rtl8192eu_iqk_path_b(priv);
+                       if (path_b_ok == 0x01) {
                                val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
                                result[t][4] = (val32 >> 16) & 0x3ff;
                                val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
@@ -4808,7 +6041,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
                        dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
 
                for (i = 0; i < retry; i++) {
-                       path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
+                       path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
                        if (path_a_ok == 0x03) {
                                val32 = rtl8xxxu_read32(priv,
                                                        REG_RX_POWER_BEFORE_IQK_B_2);
@@ -4822,13 +6055,10 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
 
                if (!path_b_ok)
                        dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
-#endif
        }
 
        /* Back to BB mode, load original value */
-       val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-       val32 &= 0x000000ff;
-       rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
 
        if (t) {
                /* Reload ADDA power saving parameters */
@@ -4848,7 +6078,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
                rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
                rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
 
-               if (priv->tx_paths > 1) {
+               if (priv->rf_paths > 1) {
                        val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
                        val32 &= 0xffffff00;
                        rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
@@ -4877,7 +6107,7 @@ static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
 }
 
-static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
 {
        struct device *dev = &priv->udev->dev;
        int result[4][8];       /* last is final result */
@@ -4975,7 +6205,7 @@ static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
                rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
                                           candidate, (reg_ec4 == 0));
 
-       rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+       rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
                           priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
 
        rtl8xxxu_prepare_calibrate(priv, 0);
@@ -5007,7 +6237,8 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
                rtl8723bu_phy_iqcalibrate(priv, result, i);
 
                if (i == 1) {
-                       simu = rtl8723bu_simularity_compare(priv, result, 0, 1);
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 0, 1);
                        if (simu) {
                                candidate = 0;
                                break;
@@ -5015,13 +6246,15 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
                }
 
                if (i == 2) {
-                       simu = rtl8723bu_simularity_compare(priv, result, 0, 2);
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 0, 2);
                        if (simu) {
                                candidate = 0;
                                break;
                        }
 
-                       simu = rtl8723bu_simularity_compare(priv, result, 1, 2);
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 1, 2);
                        if (simu) {
                                candidate = 1;
                        } else {
@@ -5080,7 +6313,7 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
                rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
                                           candidate, (reg_ec4 == 0));
 
-       rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+       rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
                           priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
 
        rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
@@ -5096,18 +6329,105 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
        rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
        rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
 
-       if (priv->rf_paths > 1) {
-               dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__);
-#ifdef RTL8723BU_PATH_B
-               if (RF_Path == 0x0)     //S1
-                       ODM_SetIQCbyRFpath(pDM_Odm, 0);
-               else    //S0
-                       ODM_SetIQCbyRFpath(pDM_Odm, 1);
-#endif
-       }
+       if (priv->rf_paths > 1)
+               dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__);
+
        rtl8xxxu_prepare_calibrate(priv, 0);
 }
 
+static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       int result[4][8];       /* last is final result */
+       int i, candidate;
+       bool path_a_ok, path_b_ok;
+       u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+       u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+       bool simu;
+
+       memset(result, 0, sizeof(result));
+       candidate = -1;
+
+       path_a_ok = false;
+       path_b_ok = false;
+
+       for (i = 0; i < 3; i++) {
+               rtl8192eu_phy_iqcalibrate(priv, result, i);
+
+               if (i == 1) {
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 0, 1);
+                       if (simu) {
+                               candidate = 0;
+                               break;
+                       }
+               }
+
+               if (i == 2) {
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 0, 2);
+                       if (simu) {
+                               candidate = 0;
+                               break;
+                       }
+
+                       simu = rtl8xxxu_gen2_simularity_compare(priv,
+                                                               result, 1, 2);
+                       if (simu)
+                               candidate = 1;
+                       else
+                               candidate = 3;
+               }
+       }
+
+       for (i = 0; i < 4; i++) {
+               reg_e94 = result[i][0];
+               reg_e9c = result[i][1];
+               reg_ea4 = result[i][2];
+               reg_eac = result[i][3];
+               reg_eb4 = result[i][4];
+               reg_ebc = result[i][5];
+               reg_ec4 = result[i][6];
+               reg_ecc = result[i][7];
+       }
+
+       if (candidate >= 0) {
+               reg_e94 = result[candidate][0];
+               priv->rege94 =  reg_e94;
+               reg_e9c = result[candidate][1];
+               priv->rege9c = reg_e9c;
+               reg_ea4 = result[candidate][2];
+               reg_eac = result[candidate][3];
+               reg_eb4 = result[candidate][4];
+               priv->regeb4 = reg_eb4;
+               reg_ebc = result[candidate][5];
+               priv->regebc = reg_ebc;
+               reg_ec4 = result[candidate][6];
+               reg_ecc = result[candidate][7];
+               dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+               dev_dbg(dev,
+                       "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+                       "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+                       reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+               path_a_ok = true;
+               path_b_ok = true;
+       } else {
+               reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+               reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+       }
+
+       if (reg_e94 && candidate >= 0)
+               rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+                                          candidate, (reg_ea4 == 0));
+
+       if (priv->rf_paths > 1)
+               rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+                                          candidate, (reg_ec4 == 0));
+
+       rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+                          priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
 static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
 {
        u32 val32;
@@ -5231,7 +6551,7 @@ static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
 static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
 {
        u8 val8;
-       int count, ret;
+       int count, ret = 0;
 
        /* Start of rtl8723AU_card_enable_flow */
        /* Act to Cardemu sequence*/
@@ -5281,7 +6601,7 @@ static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
        u8 val8;
        u16 val16;
        u32 val32;
-       int count, ret;
+       int count, ret = 0;
 
        /* Turn off RF */
        rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
@@ -5292,9 +6612,9 @@ static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
        rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
 
        /* Release WLON reset 0x04[16]= 1*/
-       val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM);
+       val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
        val32 |= APS_FSMCO_WLON_RESET;
-       rtl8xxxu_write32(priv, REG_GPIO_INTM, val32);
+       rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
 
        /* 0x0005[1] = 1 turn off MAC by HW state machine*/
        val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
@@ -5338,7 +6658,7 @@ static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
 {
        u8 val8;
        u8 val32;
-       int count, ret;
+       int count, ret = 0;
 
        rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
 
@@ -5756,6 +7076,50 @@ static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
        return retval;
 }
 
+static void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+       /* Fix USB interface interference issue */
+       rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+       rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+       /*
+        * This sets TXDMA_OFFSET_DROP_DATA_EN (bit 9) as well as bits
+        * 8 and 5, for which I have found no documentation.
+        */
+       rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+       /*
+        * Solve too many protocol error on USB bus.
+        * Can't do this for 8188/8192 UMC A cut parts
+        */
+       if (!(!priv->chip_cut && priv->vendor_umc)) {
+               rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+               rtl8xxxu_write8(priv, 0xfe41, 0x94);
+               rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+               rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+               rtl8xxxu_write8(priv, 0xfe41, 0x19);
+               rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+               rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+               rtl8xxxu_write8(priv, 0xfe41, 0x91);
+               rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+               rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+               rtl8xxxu_write8(priv, 0xfe41, 0x81);
+               rtl8xxxu_write8(priv, 0xfe42, 0x80);
+       }
+}
+
+static void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+       u32 val32;
+
+       val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+       val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+       rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+}
+
 static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
 {
        u8 val8;
@@ -5952,10 +7316,12 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
                CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
        rtl8xxxu_write16(priv, REG_CR, val16);
 
+       rtl8xxxu_write8(priv, 0xfe10, 0x19);
+
        /*
         * Workaround for 8188RU LNA power leakage problem.
         */
-       if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+       if (priv->rtl_chip == RTL8188R) {
                val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
                val32 &= ~BIT(1);
                rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -5965,6 +7331,41 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
 
 #endif
 
+/*
+ * This is needed for 8723bu as well, presumable
+ */
+static void rtl8192e_crystal_afe_adjust(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u32 val32;
+
+       /*
+        * 40Mhz crystal source, MAC 0x28[2]=0
+        */
+       val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+       val8 &= 0xfb;
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+       val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+       val32 &= 0xfffffc7f;
+       rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+
+       /*
+        * 92e AFE parameter
+        * AFE PLL KVCO selection, MAC 0x28[6]=1
+        */
+       val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+       val8 &= 0xbf;
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+       /*
+        * AFE PLL KVCO selection, MAC 0x78[21]=0
+        */
+       val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+       val32 &= 0xffdfffff;
+       rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+}
+
 static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
 {
        u16 val16;
@@ -5987,6 +7388,10 @@ static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
                rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
        }
 
+       /*
+        * Adjust AFE before enabling PLL
+        */
+       rtl8192e_crystal_afe_adjust(priv);
        rtl8192e_disabled_to_emu(priv);
 
        ret = rtl8192e_emu_to_active(priv);
@@ -6020,7 +7425,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
        /*
         * Workaround for 8188RU LNA power leakage problem.
         */
-       if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+       if (priv->rtl_chip == RTL8188R) {
                val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
                val32 |= BIT(1);
                rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6075,7 +7480,7 @@ static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
        val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
        rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
 
-       rtl8xxxu_write16(priv, REG_CR, 0x0000);
+       rtl8xxxu_write8(priv, REG_CR, 0x0000);
 
        rtl8xxxu_active_to_lps(priv);
 
@@ -6091,26 +7496,71 @@ static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
        /* Reset MCU ready status */
        rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
 
-       rtl8723bu_active_to_emu(priv);
-       rtl8xxxu_emu_to_disabled(priv);
-}
+       rtl8723bu_active_to_emu(priv);
+
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+       val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+       val8 |= BIT(0);
+       rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+}
+
+#ifdef NEED_PS_TDMA
+static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+                                 u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
+{
+       struct h2c_cmd h2c;
+
+       memset(&h2c, 0, sizeof(struct h2c_cmd));
+       h2c.b_type_dma.cmd = H2C_8723B_B_TYPE_TDMA;
+       h2c.b_type_dma.data1 = arg1;
+       h2c.b_type_dma.data2 = arg2;
+       h2c.b_type_dma.data3 = arg3;
+       h2c.b_type_dma.data4 = arg4;
+       h2c.b_type_dma.data5 = arg5;
+       rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
+}
+#endif
+
+static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+       u32 val32;
+       u8 val8;
+
+       val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+       val8 |= BIT(5);
+       rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+       /*
+        * WLAN action by PTA
+        */
+       rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+       val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+       val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+       rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+       val32 |= (BIT(0) | BIT(1));
+       rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+       rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
 
-#ifdef NEED_PS_TDMA
-static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
-                                 u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
-{
-       struct h2c_cmd h2c;
+       val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+       val32 &= ~BIT(24);
+       val32 |= BIT(23);
+       rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
 
-       memset(&h2c, 0, sizeof(struct h2c_cmd));
-       h2c.b_type_dma.cmd = H2C_8723B_B_TYPE_TDMA;
-       h2c.b_type_dma.data1 = arg1;
-       h2c.b_type_dma.data2 = arg2;
-       h2c.b_type_dma.data3 = arg3;
-       h2c.b_type_dma.data4 = arg4;
-       h2c.b_type_dma.data5 = arg5;
-       rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
+       /*
+        * Fix external switch Main->S1, Aux->S0
+        */
+       val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+       val8 &= ~BIT(0);
+       rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
 }
-#endif
 
 static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
 {
@@ -6219,12 +7669,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
 }
 
-static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
 {
        u32 val32;
 
-       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
        val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
        val32 &= ~(BIT(22) | BIT(23));
        rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
@@ -6272,11 +7720,64 @@ static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
        rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
 }
 
+static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u32 val32;
+
+       if (priv->ep_tx_normal_queue)
+               val8 = TX_PAGE_NUM_NORM_PQ;
+       else
+               val8 = 0;
+
+       rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+       val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
+
+       if (priv->ep_tx_high_queue)
+               val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+       if (priv->ep_tx_low_queue)
+               val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+       rtl8xxxu_write32(priv, REG_RQPN, val32);
+}
+
+static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+       struct rtl8xxxu_fileops *fops = priv->fops;
+       u32 hq, lq, nq, eq, pubq;
+       u32 val32;
+
+       hq = 0;
+       lq = 0;
+       nq = 0;
+       eq = 0;
+       pubq = 0;
+
+       if (priv->ep_tx_high_queue)
+               hq = fops->page_num_hi;
+       if (priv->ep_tx_low_queue)
+               lq = fops->page_num_lo;
+       if (priv->ep_tx_normal_queue)
+               nq = fops->page_num_norm;
+
+       val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
+       rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
+
+       pubq = fops->total_page_num - hq - lq - nq;
+
+       val32 = RQPN_LOAD;
+       val32 |= (hq << RQPN_HI_PQ_SHIFT);
+       val32 |= (lq << RQPN_LO_PQ_SHIFT);
+       val32 |= (pubq << RQPN_PUB_PQ_SHIFT);
+
+       rtl8xxxu_write32(priv, REG_RQPN, val32);
+}
+
 static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
        struct device *dev = &priv->udev->dev;
-       struct rtl8xxxu_rfregval *rftable;
        bool macpower;
        int ret;
        u8 val8;
@@ -6301,33 +7802,22 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                goto exit;
        }
 
-       dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
        if (!macpower) {
-               ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
-               if (ret) {
-                       dev_warn(dev, "%s: LLT table init failed\n", __func__);
-                       goto exit;
-               }
+               if (priv->fops->total_page_num)
+                       rtl8xxxu_init_queue_reserved_page(priv);
+               else
+                       rtl8xxxu_old_init_queue_reserved_page(priv);
+       }
 
-               /*
-                * Presumably this is for 8188EU as well
-                * Enable TX report and TX report timer
-                */
-               if (priv->rtlchip == 0x8723bu) {
-                       val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
-                       val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
-                       rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
-                       /* Set MAX RPT MACID */
-                       rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
-                       /* TX report Timer. Unit: 32us */
-                       rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+       ret = rtl8xxxu_init_queue_priority(priv);
+       dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+       if (ret)
+               goto exit;
 
-                       /* tmp ps ? */
-                       val8 = rtl8xxxu_read8(priv, 0xa3);
-                       val8 &= 0xf8;
-                       rtl8xxxu_write8(priv, 0xa3, val8);
-               }
-       }
+       /*
+        * Set RX page boundary
+        */
+       rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
 
        ret = rtl8xxxu_download_firmware(priv);
        dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
@@ -6338,41 +7828,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        if (ret)
                goto exit;
 
-       /* Solve too many protocol error on USB bus */
-       /* Can't do this for 8188/8192 UMC A cut parts */
-       if (priv->rtlchip == 0x8723a ||
-           ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c ||
-             priv->rtlchip == 0x8188c) &&
-            (priv->chip_cut || !priv->vendor_umc))) {
-               rtl8xxxu_write8(priv, 0xfe40, 0xe6);
-               rtl8xxxu_write8(priv, 0xfe41, 0x94);
-               rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-               rtl8xxxu_write8(priv, 0xfe40, 0xe0);
-               rtl8xxxu_write8(priv, 0xfe41, 0x19);
-               rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-               rtl8xxxu_write8(priv, 0xfe40, 0xe5);
-               rtl8xxxu_write8(priv, 0xfe41, 0x91);
-               rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-               rtl8xxxu_write8(priv, 0xfe40, 0xe2);
-               rtl8xxxu_write8(priv, 0xfe41, 0x81);
-               rtl8xxxu_write8(priv, 0xfe42, 0x80);
-       }
-
-       if (priv->rtlchip == 0x8192e) {
-               rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
-               rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
-       }
-
        if (priv->fops->phy_init_antenna_selection)
                priv->fops->phy_init_antenna_selection(priv);
 
-       if (priv->rtlchip == 0x8723b)
-               ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
-       else
-               ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+       ret = rtl8xxxu_init_mac(priv);
 
        dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
        if (ret)
@@ -6383,92 +7842,37 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        if (ret)
                goto exit;
 
-       switch(priv->rtlchip) {
-       case 0x8723a:
-               rftable = rtl8723au_radioa_1t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-               break;
-       case 0x8723b:
-               rftable = rtl8723bu_radioa_1t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-               /*
-                * PHY LCK
-                */
-               rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
-               rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
-               msleep(200);
-               rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
-               break;
-       case 0x8188c:
-               if (priv->hi_pa)
-                       rftable = rtl8188ru_radioa_1t_highpa_table;
-               else
-                       rftable = rtl8192cu_radioa_1t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-               break;
-       case 0x8191c:
-               rftable = rtl8192cu_radioa_1t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-               break;
-       case 0x8192c:
-               rftable = rtl8192cu_radioa_2t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-               if (ret)
-                       break;
-               rftable = rtl8192cu_radiob_2t_init_table;
-               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
+       ret = priv->fops->init_phy_rf(priv);
        if (ret)
                goto exit;
 
-       /*
-        * Chip specific quirks
-        */
-       if (priv->rtlchip == 0x8723a) {
-               /* Fix USB interface interference issue */
-               rtl8xxxu_write8(priv, 0xfe40, 0xe0);
-               rtl8xxxu_write8(priv, 0xfe41, 0x8d);
-               rtl8xxxu_write8(priv, 0xfe42, 0x80);
-               rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+       /* RFSW Control - clear bit 14 ?? */
+       if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+               rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
 
-               /* Reduce 80M spur */
-               rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
-               rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
-               rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
-               rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
-       } else {
-               val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
-               val32 |= TXDMA_OFFSET_DROP_DATA_EN;
-               rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+       val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+               FPGA0_RF_ANTSWB |
+               ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB) << FPGA0_RF_BD_CTRL_SHIFT);
+       if (!priv->no_pape) {
+               val32 |= (FPGA0_RF_PAPE |
+                         (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
        }
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
 
-       if (!macpower) {
-               if (priv->ep_tx_normal_queue)
-                       val8 = TX_PAGE_NUM_NORM_PQ;
-               else
-                       val8 = 0;
-
-               rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
-               val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
-
-               if (priv->ep_tx_high_queue)
-                       val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
-               if (priv->ep_tx_low_queue)
-                       val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
-               rtl8xxxu_write32(priv, REG_RQPN, val32);
+       /* 0x860[6:5]= 00 - why? - this sets antenna B */
+       if (priv->rtl_chip != RTL8192E)
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
 
+       if (!macpower) {
                /*
                 * Set TX buffer boundary
                 */
-               val8 = TX_TOTAL_PAGE_NUM + 1;
+               if (priv->rtl_chip == RTL8192E)
+                       val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
+               else
+                       val8 = TX_TOTAL_PAGE_NUM + 1;
 
-               if (priv->rtlchip == 0x8723b)
+               if (priv->rtl_chip == RTL8723B)
                        val8 -= 1;
 
                rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
@@ -6478,54 +7882,63 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
        }
 
-       ret = rtl8xxxu_init_queue_priority(priv);
-       dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
-       if (ret)
-               goto exit;
+       /*
+        * The vendor drivers set PBP for all devices, except 8192e.
+        * There is no explanation for this in any of the sources.
+        */
+       val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+               (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+       if (priv->rtl_chip != RTL8192E)
+               rtl8xxxu_write8(priv, REG_PBP, val8);
 
-       /* RFSW Control - clear bit 14 ?? */
-       if (priv->rtlchip != 0x8723b)
-               rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
-       /* 0x07000760 */
-       val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
-               FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
-               ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
-                FPGA0_RF_BD_CTRL_SHIFT);
-       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
-       /* 0x860[6:5]= 00 - why? - this sets antenna B */
-       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+       dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+       if (!macpower) {
+               ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+               if (ret) {
+                       dev_warn(dev, "%s: LLT table init failed\n", __func__);
+                       goto exit;
+               }
 
-       priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
-                                                 RF6052_REG_MODE_AG);
+               /*
+                * Chip specific quirks
+                */
+               priv->fops->usb_quirks(priv);
 
-       /*
-        * Set RX page boundary
-        */
-       if (priv->rtlchip == 0x8723b)
-               rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
-       else
-               rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
-       /*
-        * Transfer page size is always 128
-        */
-       if (priv->rtlchip == 0x8723b)
-               val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
-                       (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
-       else
-               val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
-                       (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
-       rtl8xxxu_write8(priv, REG_PBP, val8);
+               /*
+                * Presumably this is for 8188EU as well
+                * Enable TX report and TX report timer
+                */
+               if (priv->rtl_chip == RTL8723B) {
+                       val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+                       val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
+                       rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+                       /* Set MAX RPT MACID */
+                       rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
+                       /* TX report Timer. Unit: 32us */
+                       rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+
+                       /* tmp ps ? */
+                       val8 = rtl8xxxu_read8(priv, 0xa3);
+                       val8 &= 0xf8;
+                       rtl8xxxu_write8(priv, 0xa3, val8);
+               }
+       }
 
        /*
         * Unit in 8 bytes, not obvious what it is used for
         */
        rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
 
-       /*
-        * Enable all interrupts - not obvious USB needs to do this
-        */
-       rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
-       rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+       if (priv->rtl_chip == RTL8192E) {
+               rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
+               rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+       } else {
+               /*
+                * Enable all interrupts - not obvious USB needs to do this
+                */
+               rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+               rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+       }
 
        rtl8xxxu_set_mac(priv);
        rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
@@ -6600,7 +8013,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        /*
         * Initialize burst parameters
         */
-       if (priv->rtlchip == 0x8723b) {
+       if (priv->rtl_chip == RTL8723B) {
                /*
                 * For USB high speed set 512B packets
                 */
@@ -6651,9 +8064,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        priv->fops->set_tx_power(priv, 1, false);
 
        /* Let the 8051 take control of antenna setting */
-       val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
-       val8 |= LEDCFG2_DPDT_SELECT;
-       rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+       if (priv->rtl_chip != RTL8192E) {
+               val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+               val8 |= LEDCFG2_DPDT_SELECT;
+               rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+       }
 
        rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
 
@@ -6665,6 +8080,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        if (priv->fops->init_statistics)
                priv->fops->init_statistics(priv);
 
+       if (priv->rtl_chip == RTL8192E) {
+               /*
+                * 0x4c6[3] 1: RTS BW = Data BW
+                * 0: RTS BW depends on CCA / secondary CCA result.
+                */
+               val8 = rtl8xxxu_read8(priv, REG_QUEUE_CTRL);
+               val8 &= ~BIT(3);
+               rtl8xxxu_write8(priv, REG_QUEUE_CTRL, val8);
+               /*
+                * Reset USB mode switch setting
+                */
+               rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
+       }
+
        rtl8723a_phy_lc_calibrate(priv);
 
        priv->fops->phy_iq_calibrate(priv);
@@ -6672,7 +8101,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        /*
         * This should enable thermal meter
         */
-       if (priv->fops->has_s0s1)
+       if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
                rtl8xxxu_write_rfreg(priv,
                                     RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
        else
@@ -6682,7 +8111,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
        rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
 
-       if (priv->rtlchip == 0x8723a) {
+       if (priv->rtl_chip == RTL8723A) {
                /*
                 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
                 * but we need to find root cause.
@@ -6693,6 +8122,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                        val32 |= FPGA_RF_MODE_CCK;
                        rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
                }
+       } else if (priv->rtl_chip == RTL8192E) {
+               rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
        }
 
        val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
@@ -6700,17 +8131,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        /* ack for xmit mgmt frames. */
        rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
 
+       if (priv->rtl_chip == RTL8192E) {
+               /*
+                * Fix LDPC rx hang issue.
+                */
+               val32 = rtl8xxxu_read32(priv, REG_AFE_MISC);
+               rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75);
+               val32 &= 0xfff00fff;
+               val32 |= 0x0007e000;
+               rtl8xxxu_write32(priv, REG_AFE_MISC, val32);
+       }
 exit:
        return ret;
 }
 
-static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
-{
-       struct rtl8xxxu_priv *priv = hw->priv;
-
-       priv->fops->power_off(priv);
-}
-
 static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
                               struct ieee80211_key_conf *key, const u8 *mac)
 {
@@ -6775,8 +8209,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
        rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
 }
 
-static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
-                                      u32 ramask, int sgi)
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+                                     u32 ramask, int sgi)
 {
        struct h2c_cmd h2c;
 
@@ -6795,8 +8229,8 @@ static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
 }
 
-static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
-                                      u32 ramask, int sgi)
+static void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+                                          u32 ramask, int sgi)
 {
        struct h2c_cmd h2c;
        u8 bw = 0;
@@ -6821,8 +8255,8 @@ static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
 }
 
-static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
-                                    u8 macid, bool connect)
+static void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+                                        u8 macid, bool connect)
 {
        struct h2c_cmd h2c;
 
@@ -6838,8 +8272,8 @@ static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
        rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
 }
 
-static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
-                                    u8 macid, bool connect)
+static void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+                                        u8 macid, bool connect)
 {
        struct h2c_cmd h2c;
 
@@ -7014,7 +8448,7 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
  * format. The descriptor checksum is still only calculated over the
  * initial 32 bytes of the descriptor!
  */
-static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc)
 {
        __le16 *ptr = (__le16 *)tx_desc;
        u16 csum = 0;
@@ -7026,7 +8460,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
         */
        tx_desc->csum = cpu_to_le16(0);
 
-       for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++)
+       for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++)
                csum = csum ^ le16_to_cpu(ptr[i]);
 
        tx_desc->csum |= cpu_to_le16(csum);
@@ -7164,8 +8598,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
        struct rtl8xxxu_priv *priv = hw->priv;
-       struct rtl8723au_tx_desc *tx_desc;
-       struct rtl8723bu_tx_desc *tx_desc40;
+       struct rtl8xxxu_txdesc32 *tx_desc;
+       struct rtl8xxxu_txdesc40 *tx_desc40;
        struct rtl8xxxu_tx_urb *tx_urb;
        struct ieee80211_sta *sta = NULL;
        struct ieee80211_vif *vif = tx_info->control.vif;
@@ -7210,7 +8644,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
        if (control && control->sta)
                sta = control->sta;
 
-       tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size);
+       tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
 
        memset(tx_desc, 0, tx_desc_size);
        tx_desc->pkt_size = cpu_to_le16(pktlen);
@@ -7267,37 +8701,35 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
                        tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
 
                tx_desc->txdw3 =
-                       cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A);
+                       cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
 
                if (ampdu_enable)
-                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A);
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
                else
-                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A);
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
 
                if (ieee80211_is_mgmt(hdr->frame_control)) {
                        tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
                        tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A);
+                               cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
                        tx_desc->txdw5 |=
-                               cpu_to_le32(6 <<
-                                           TXDESC_RETRY_LIMIT_SHIFT_8723A);
+                               cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
                        tx_desc->txdw5 |=
-                               cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A);
+                               cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
                }
 
                if (ieee80211_is_data_qos(hdr->frame_control))
-                       tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A);
+                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
 
                if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
                    (sta && vif && vif->bss_conf.use_short_preamble))
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
+                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
 
                if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
                    (ieee80211_is_data_qos(hdr->frame_control) &&
                     sta && sta->ht_cap.cap &
                     (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
-                       tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+                       tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
                }
 
                if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -7307,46 +8739,43 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
                         */
                        tx_desc->txdw4 |=
                                cpu_to_le32(DESC_RATE_24M <<
-                                           TXDESC_RTS_RATE_SHIFT_8723A);
+                                           TXDESC32_RTS_RATE_SHIFT);
                        tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A);
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
+                               cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
                }
        } else {
-               tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc;
+               tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
 
                tx_desc40->txdw4 = cpu_to_le32(rate);
                if (ieee80211_is_data(hdr->frame_control)) {
                        tx_desc->txdw4 |=
                                cpu_to_le32(0x1f <<
-                                           TXDESC_DATA_RATE_FB_SHIFT_8723B);
+                                           TXDESC40_DATA_RATE_FB_SHIFT);
                }
 
                tx_desc40->txdw9 =
-                       cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B);
+                       cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
 
                if (ampdu_enable)
-                       tx_desc40->txdw2 |=
-                               cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
+                       tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
                else
-                       tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B);
+                       tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
 
                if (ieee80211_is_mgmt(hdr->frame_control)) {
                        tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
                        tx_desc40->txdw3 |=
-                               cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B);
+                               cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
                        tx_desc40->txdw4 |=
-                               cpu_to_le32(6 <<
-                                           TXDESC_RETRY_LIMIT_SHIFT_8723B);
+                               cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
                        tx_desc40->txdw4 |=
-                               cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B);
+                               cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
                }
 
                if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
                    (sta && vif && vif->bss_conf.use_short_preamble))
                        tx_desc40->txdw5 |=
-                               cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B);
+                               cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
 
                if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
                        /*
@@ -7355,11 +8784,9 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
                         */
                        tx_desc->txdw4 |=
                                cpu_to_le32(DESC_RATE_24M <<
-                                           TXDESC_RTS_RATE_SHIFT_8723B);
-                       tx_desc->txdw3 |=
-                               cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B);
-                       tx_desc->txdw3 |=
-                               cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
+                                           TXDESC40_RTS_RATE_SHIFT);
+                       tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+                       tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
                }
        }
 
@@ -7499,15 +8926,22 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
        }
 }
 
-static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
+static int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv,
                                   struct sk_buff *skb,
                                   struct ieee80211_rx_status *rx_status)
 {
-       struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+       struct rtl8xxxu_rxdesc16 *rx_desc =
+               (struct rtl8xxxu_rxdesc16 *)skb->data;
        struct rtl8723au_phy_stats *phy_stats;
+       __le32 *_rx_desc_le = (__le32 *)skb->data;
+       u32 *_rx_desc = (u32 *)skb->data;
        int drvinfo_sz, desc_shift;
+       int i;
+
+       for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
+               _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
 
-       skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+       skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
 
        phy_stats = (struct rtl8723au_phy_stats *)skb->data;
 
@@ -7539,16 +8973,22 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
        return RX_TYPE_DATA_PKT;
 }
 
-static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv,
+static int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv,
                                   struct sk_buff *skb,
                                   struct ieee80211_rx_status *rx_status)
 {
-       struct rtl8723bu_rx_desc *rx_desc =
-               (struct rtl8723bu_rx_desc *)skb->data;
+       struct rtl8xxxu_rxdesc24 *rx_desc =
+               (struct rtl8xxxu_rxdesc24 *)skb->data;
        struct rtl8723au_phy_stats *phy_stats;
+       __le32 *_rx_desc_le = (__le32 *)skb->data;
+       u32 *_rx_desc = (u32 *)skb->data;
        int drvinfo_sz, desc_shift;
+       int i;
+
+       for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
+               _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
 
-       skb_pull(skb, sizeof(struct rtl8723bu_rx_desc));
+       skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
 
        phy_stats = (struct rtl8723au_phy_stats *)skb->data;
 
@@ -7640,12 +9080,7 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
        struct sk_buff *skb = (struct sk_buff *)urb->context;
        struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
        struct device *dev = &priv->udev->dev;
-       __le32 *_rx_desc_le = (__le32 *)skb->data;
-       u32 *_rx_desc = (u32 *)skb->data;
-       int rx_type, i;
-
-       for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
-               _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+       int rx_type;
 
        skb_put(skb, urb->actual_length);
 
@@ -7684,14 +9119,15 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
 {
        struct sk_buff *skb;
        int skb_size;
-       int ret;
+       int ret, rx_desc_sz;
 
-       skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+       rx_desc_sz = priv->fops->rx_desc_size;
+       skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE;
        skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
-       memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+       memset(skb->data, 0, rx_desc_sz);
        usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
                          skb_size, rtl8xxxu_rx_complete, skb);
        usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
@@ -8161,6 +9597,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
        if (priv->usb_interrupts)
                usb_kill_anchored_urbs(&priv->int_anchor);
 
+       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
        priv->fops->disable_rf(priv);
 
        /*
@@ -8293,6 +9731,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
                if (id->idProduct == 0x7811)
                        untested = 0;
                break;
+       case 0x050d:
+               if (id->idProduct == 0x1004)
+                       untested = 0;
+               break;
        default:
                break;
        }
@@ -8385,7 +9827,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
                dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
                sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
        }
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 
        hw->wiphy->rts_threshold = 2347;
 
@@ -8421,13 +9863,14 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
        hw = usb_get_intfdata(interface);
        priv = hw->priv;
 
-       rtl8xxxu_disable_device(hw);
+       ieee80211_unregister_hw(hw);
+
+       priv->fops->power_off(priv);
+
        usb_set_intfdata(interface, NULL);
 
        dev_info(&priv->udev->dev, "disconnecting\n");
 
-       ieee80211_unregister_hw(hw);
-
        kfree(priv->fw_data);
        mutex_destroy(&priv->usb_buf_mutex);
        mutex_destroy(&priv->h2c_mutex);
@@ -8443,22 +9886,30 @@ static struct rtl8xxxu_fileops rtl8723au_fops = {
        .power_off = rtl8xxxu_power_off,
        .reset_8051 = rtl8xxxu_reset_8051,
        .llt_init = rtl8xxxu_init_llt_table,
-       .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
-       .config_channel = rtl8723au_config_channel,
-       .parse_rx_desc = rtl8723au_parse_rx_desc,
-       .enable_rf = rtl8723a_enable_rf,
-       .disable_rf = rtl8723a_disable_rf,
-       .set_tx_power = rtl8723a_set_tx_power,
-       .update_rate_mask = rtl8723au_update_rate_mask,
-       .report_connect = rtl8723au_report_connect,
+       .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+       .init_phy_rf = rtl8723au_init_phy_rf,
+       .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+       .config_channel = rtl8xxxu_gen1_config_channel,
+       .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+       .enable_rf = rtl8xxxu_gen1_enable_rf,
+       .disable_rf = rtl8xxxu_gen1_disable_rf,
+       .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+       .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+       .update_rate_mask = rtl8xxxu_update_rate_mask,
+       .report_connect = rtl8xxxu_gen1_report_connect,
        .writeN_block_size = 1024,
        .mbox_ext_reg = REG_HMBOX_EXT_0,
        .mbox_ext_width = 2,
-       .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+       .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+       .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
        .adda_1t_init = 0x0b1b25a0,
        .adda_1t_path_on = 0x0bdb25a0,
        .adda_2t_path_on_a = 0x04db25a4,
        .adda_2t_path_on_b = 0x0b1b25a4,
+       .trxff_boundary = 0x27ff,
+       .pbp_rx = PBP_PAGE_SIZE_128,
+       .pbp_tx = PBP_PAGE_SIZE_128,
+       .mactable = rtl8xxxu_gen1_mac_init_table,
 };
 
 static struct rtl8xxxu_fileops rtl8723bu_fops = {
@@ -8468,26 +9919,34 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = {
        .power_off = rtl8723bu_power_off,
        .reset_8051 = rtl8723bu_reset_8051,
        .llt_init = rtl8xxxu_auto_llt_table,
+       .init_phy_bb = rtl8723bu_init_phy_bb,
+       .init_phy_rf = rtl8723bu_init_phy_rf,
        .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
        .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
-       .config_channel = rtl8723bu_config_channel,
-       .parse_rx_desc = rtl8723bu_parse_rx_desc,
+       .config_channel = rtl8xxxu_gen2_config_channel,
+       .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
        .init_aggregation = rtl8723bu_init_aggregation,
        .init_statistics = rtl8723bu_init_statistics,
        .enable_rf = rtl8723b_enable_rf,
-       .disable_rf = rtl8723b_disable_rf,
+       .disable_rf = rtl8xxxu_gen2_disable_rf,
+       .usb_quirks = rtl8xxxu_gen2_usb_quirks,
        .set_tx_power = rtl8723b_set_tx_power,
-       .update_rate_mask = rtl8723bu_update_rate_mask,
-       .report_connect = rtl8723bu_report_connect,
+       .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+       .report_connect = rtl8xxxu_gen2_report_connect,
        .writeN_block_size = 1024,
        .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
        .mbox_ext_width = 4,
-       .tx_desc_size = sizeof(struct rtl8723bu_tx_desc),
+       .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+       .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
        .has_s0s1 = 1,
        .adda_1t_init = 0x01c00014,
        .adda_1t_path_on = 0x01c00014,
        .adda_2t_path_on_a = 0x01c00014,
        .adda_2t_path_on_b = 0x01c00014,
+       .trxff_boundary = 0x3f7f,
+       .pbp_rx = PBP_PAGE_SIZE_256,
+       .pbp_tx = PBP_PAGE_SIZE_256,
+       .mactable = rtl8723b_mac_init_table,
 };
 
 #ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -8499,22 +9958,30 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = {
        .power_off = rtl8xxxu_power_off,
        .reset_8051 = rtl8xxxu_reset_8051,
        .llt_init = rtl8xxxu_init_llt_table,
-       .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
-       .config_channel = rtl8723au_config_channel,
-       .parse_rx_desc = rtl8723au_parse_rx_desc,
-       .enable_rf = rtl8723a_enable_rf,
-       .disable_rf = rtl8723a_disable_rf,
-       .set_tx_power = rtl8723a_set_tx_power,
-       .update_rate_mask = rtl8723au_update_rate_mask,
-       .report_connect = rtl8723au_report_connect,
+       .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+       .init_phy_rf = rtl8192cu_init_phy_rf,
+       .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+       .config_channel = rtl8xxxu_gen1_config_channel,
+       .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+       .enable_rf = rtl8xxxu_gen1_enable_rf,
+       .disable_rf = rtl8xxxu_gen1_disable_rf,
+       .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+       .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+       .update_rate_mask = rtl8xxxu_update_rate_mask,
+       .report_connect = rtl8xxxu_gen1_report_connect,
        .writeN_block_size = 128,
        .mbox_ext_reg = REG_HMBOX_EXT_0,
        .mbox_ext_width = 2,
-       .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+       .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+       .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
        .adda_1t_init = 0x0b1b25a0,
        .adda_1t_path_on = 0x0bdb25a0,
        .adda_2t_path_on_a = 0x04db25a4,
        .adda_2t_path_on_b = 0x0b1b25a4,
+       .trxff_boundary = 0x27ff,
+       .pbp_rx = PBP_PAGE_SIZE_128,
+       .pbp_tx = PBP_PAGE_SIZE_128,
+       .mactable = rtl8xxxu_gen1_mac_init_table,
 };
 
 #endif
@@ -8526,23 +9993,33 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = {
        .power_off = rtl8xxxu_power_off,
        .reset_8051 = rtl8xxxu_reset_8051,
        .llt_init = rtl8xxxu_auto_llt_table,
-       .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
-       .config_channel = rtl8723bu_config_channel,
-       .parse_rx_desc = rtl8723bu_parse_rx_desc,
-       .enable_rf = rtl8723b_enable_rf,
-       .disable_rf = rtl8723b_disable_rf,
-       .set_tx_power = rtl8723b_set_tx_power,
-       .update_rate_mask = rtl8723au_update_rate_mask,
-       .report_connect = rtl8723au_report_connect,
+       .init_phy_bb = rtl8192eu_init_phy_bb,
+       .init_phy_rf = rtl8192eu_init_phy_rf,
+       .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate,
+       .config_channel = rtl8xxxu_gen2_config_channel,
+       .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+       .enable_rf = rtl8192e_enable_rf,
+       .disable_rf = rtl8xxxu_gen2_disable_rf,
+       .usb_quirks = rtl8xxxu_gen2_usb_quirks,
+       .set_tx_power = rtl8192e_set_tx_power,
+       .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+       .report_connect = rtl8xxxu_gen2_report_connect,
        .writeN_block_size = 128,
        .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
        .mbox_ext_width = 4,
-       .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
-       .has_s0s1 = 1,
+       .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+       .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+       .has_s0s1 = 0,
        .adda_1t_init = 0x0fc01616,
        .adda_1t_path_on = 0x0fc01616,
        .adda_2t_path_on_a = 0x0fc01616,
        .adda_2t_path_on_b = 0x0fc01616,
+       .trxff_boundary = 0x3cff,
+       .mactable = rtl8192e_mac_init_table,
+       .total_page_num = TX_TOTAL_PAGE_NUM_8192E,
+       .page_num_hi = TX_PAGE_NUM_HI_PQ_8192E,
+       .page_num_lo = TX_PAGE_NUM_LO_PQ_8192E,
+       .page_num_norm = TX_PAGE_NUM_NORM_PQ_8192E,
 };
 
 static struct usb_device_id dev_table[] = {
@@ -8567,6 +10044,9 @@ static struct usb_device_id dev_table[] = {
 /* Tested by Larry Finger */
 {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Andrea Merello */
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
 /* Currently untested 8188 series devices */
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
@@ -8651,8 +10131,6 @@ static struct usb_device_id dev_table[] = {
 /* Currently untested 8192 series devices */
 {USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
-       .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
@@ -8708,6 +10186,7 @@ static struct usb_driver rtl8xxxu_driver = {
        .probe = rtl8xxxu_probe,
        .disconnect = rtl8xxxu_disconnect,
        .id_table = dev_table,
+       .no_dynamic_id = 1,
        .disable_hub_initiated_lpm = 1,
 };
 
index 7b73654e1368104cd2ec8a87fcac0d593fd5160e..3e2643c79b56c84caabaf3257b6e1b7d5f5c5dfd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
 #define REALTEK_USB_CMD_IDX            0x00
 
 #define TX_TOTAL_PAGE_NUM              0xf8
+#define TX_TOTAL_PAGE_NUM_8192E                0xf3
 /* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
 #define TX_PAGE_NUM_PUBQ               0xe7
 #define TX_PAGE_NUM_HI_PQ              0x0c
 #define TX_PAGE_NUM_LO_PQ              0x02
 #define TX_PAGE_NUM_NORM_PQ            0x02
 
+#define TX_PAGE_NUM_PUBQ_8192E         0xe7
+#define TX_PAGE_NUM_HI_PQ_8192E                0x08
+#define TX_PAGE_NUM_LO_PQ_8192E                0x0c
+#define TX_PAGE_NUM_NORM_PQ_8192E      0x00
+
 #define RTL_FW_PAGE_SIZE               4096
 #define RTL8XXXU_FIRMWARE_POLL_MAX     1000
 
 #define EFUSE_BT_MAP_LEN_8723A         1024
 #define EFUSE_MAX_WORD_UNIT            4
 
+enum rtl8xxxu_rtl_chip {
+       RTL8192S = 0x81920,
+       RTL8191S = 0x81910,
+       RTL8192C = 0x8192c,
+       RTL8191C = 0x8191c,
+       RTL8188C = 0x8188c,
+       RTL8188R = 0x81889,
+       RTL8192D = 0x8192d,
+       RTL8723A = 0x8723a,
+       RTL8188E = 0x8188e,
+       RTL8812  = 0x88120,
+       RTL8821  = 0x88210,
+       RTL8192E = 0x8192e,
+       RTL8191E = 0x8191e,
+       RTL8723B = 0x8723b,
+       RTL8814A = 0x8814a,
+       RTL8881A = 0x8881a,
+       RTL8821B = 0x8821b,
+       RTL8822B = 0x8822b,
+       RTL8703B = 0x8703b,
+       RTL8195A = 0x8195a,
+       RTL8188F = 0x8188f
+};
+
 enum rtl8xxxu_rx_type {
        RX_TYPE_DATA_PKT = 0,
        RX_TYPE_C2H = 1,
        RX_TYPE_ERROR = -1
 };
 
-struct rtl8xxxu_rx_desc {
+struct rtl8xxxu_rxdesc16 {
 #ifdef __LITTLE_ENDIAN
        u32 pktlen:14;
        u32 crc32:1;
@@ -207,7 +237,7 @@ struct rtl8xxxu_rx_desc {
 #endif
 };
 
-struct rtl8723bu_rx_desc {
+struct rtl8xxxu_rxdesc24 {
 #ifdef __LITTLE_ENDIAN
        u32 pktlen:14;
        u32 crc32:1;
@@ -332,7 +362,7 @@ struct rtl8723bu_rx_desc {
        __le32 tsfl;
 };
 
-struct rtl8723au_tx_desc {
+struct rtl8xxxu_txdesc32 {
        __le16 pkt_size;
        u8 pkt_offset;
        u8 txdw0;
@@ -346,7 +376,7 @@ struct rtl8723au_tx_desc {
        __le16 txdw7;
 };
 
-struct rtl8723bu_tx_desc {
+struct rtl8xxxu_txdesc40 {
        __le16 pkt_size;
        u8 pkt_offset;
        u8 txdw0;
@@ -422,10 +452,10 @@ struct rtl8723bu_tx_desc {
  * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
  */
 #define TXDESC_PKT_OFFSET_SZ           0
-#define TXDESC_AGG_ENABLE_8723A                BIT(5)
-#define TXDESC_AGG_BREAK_8723A         BIT(6)
-#define TXDESC_MACID_SHIFT_8723B       0
-#define TXDESC_MACID_MASK_8723B                0x00f0
+#define TXDESC32_AGG_ENABLE            BIT(5)
+#define TXDESC32_AGG_BREAK             BIT(6)
+#define TXDESC40_MACID_SHIFT           0
+#define TXDESC40_MACID_MASK            0x00f0
 #define TXDESC_QUEUE_SHIFT             8
 #define TXDESC_QUEUE_MASK              0x1f00
 #define TXDESC_QUEUE_BK                        0x2
@@ -437,9 +467,9 @@ struct rtl8723bu_tx_desc {
 #define TXDESC_QUEUE_MGNT              0x12
 #define TXDESC_QUEUE_CMD               0x13
 #define TXDESC_QUEUE_MAX               (TXDESC_QUEUE_CMD + 1)
-#define TXDESC_RDG_NAV_EXT_8723B       BIT(13)
-#define TXDESC_LSIG_TXOP_ENABLE_8723B  BIT(14)
-#define TXDESC_PIFS_8723B              BIT(15)
+#define TXDESC40_RDG_NAV_EXT           BIT(13)
+#define TXDESC40_LSIG_TXOP_ENABLE      BIT(14)
+#define TXDESC40_PIFS                  BIT(15)
 
 #define DESC_RATE_ID_SHIFT             16
 #define DESC_RATE_ID_MASK              0xf
@@ -451,71 +481,71 @@ struct rtl8723bu_tx_desc {
 #define TXDESC_HWPC                    BIT(31)
 
 /* Word 2 */
-#define TXDESC_PAID_SHIFT_8723B                0
-#define TXDESC_PAID_MASK_8723B         0x1ff
-#define TXDESC_CCA_RTS_SHIFT_8723B     10
-#define TXDESC_CCA_RTS_MASK_8723B      0xc00
-#define TXDESC_AGG_ENABLE_8723B                BIT(12)
-#define TXDESC_RDG_ENABLE_8723B                BIT(13)
-#define TXDESC_AGG_BREAK_8723B         BIT(16)
-#define TXDESC_MORE_FRAG_8723B         BIT(17)
-#define TXDESC_RAW_8723B               BIT(18)
-#define TXDESC_ACK_REPORT_8723A                BIT(19)
-#define TXDESC_SPE_RPT_8723B           BIT(19)
+#define TXDESC40_PAID_SHIFT            0
+#define TXDESC40_PAID_MASK             0x1ff
+#define TXDESC40_CCA_RTS_SHIFT         10
+#define TXDESC40_CCA_RTS_MASK          0xc00
+#define TXDESC40_AGG_ENABLE            BIT(12)
+#define TXDESC40_RDG_ENABLE            BIT(13)
+#define TXDESC40_AGG_BREAK             BIT(16)
+#define TXDESC40_MORE_FRAG             BIT(17)
+#define TXDESC40_RAW                   BIT(18)
+#define TXDESC32_ACK_REPORT            BIT(19)
+#define TXDESC40_SPE_RPT               BIT(19)
 #define TXDESC_AMPDU_DENSITY_SHIFT     20
-#define TXDESC_BT_INT_8723B            BIT(23)
-#define TXDESC_GID_8723B               BIT(24)
+#define TXDESC40_BT_INT                        BIT(23)
+#define TXDESC40_GID_SHIFT             24
 
 /* Word 3 */
-#define TXDESC_USE_DRIVER_RATE_8723B   BIT(8)
-#define TXDESC_CTS_SELF_ENABLE_8723B   BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723B    BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723B     BIT(13)
-#define TXDESC_SEQ_SHIFT_8723A         16
-#define TXDESC_SEQ_MASK_8723A          0x0fff0000
+#define TXDESC40_USE_DRIVER_RATE       BIT(8)
+#define TXDESC40_CTS_SELF_ENABLE       BIT(11)
+#define TXDESC40_RTS_CTS_ENABLE                BIT(12)
+#define TXDESC40_HW_RTS_ENABLE         BIT(13)
+#define TXDESC32_SEQ_SHIFT             16
+#define TXDESC32_SEQ_MASK              0x0fff0000
 
 /* Word 4 */
-#define TXDESC_RTS_RATE_SHIFT_8723A    0
-#define TXDESC_RTS_RATE_MASK_8723A     0x3f
-#define TXDESC_QOS_8723A               BIT(6)
-#define TXDESC_HW_SEQ_ENABLE_8723A     BIT(7)
-#define TXDESC_USE_DRIVER_RATE_8723A   BIT(8)
+#define TXDESC32_RTS_RATE_SHIFT                0
+#define TXDESC32_RTS_RATE_MASK         0x3f
+#define TXDESC32_QOS                   BIT(6)
+#define TXDESC32_HW_SEQ_ENABLE         BIT(7)
+#define TXDESC32_USE_DRIVER_RATE       BIT(8)
 #define TXDESC_DISABLE_DATA_FB         BIT(10)
-#define TXDESC_CTS_SELF_ENABLE_8723A   BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723A    BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723A     BIT(13)
+#define TXDESC32_CTS_SELF_ENABLE       BIT(11)
+#define TXDESC32_RTS_CTS_ENABLE                BIT(12)
+#define TXDESC32_HW_RTS_ENABLE         BIT(13)
 #define TXDESC_PRIME_CH_OFF_LOWER      BIT(20)
 #define TXDESC_PRIME_CH_OFF_UPPER      BIT(21)
-#define TXDESC_SHORT_PREAMBLE_8723A    BIT(24)
+#define TXDESC32_SHORT_PREAMBLE                BIT(24)
 #define TXDESC_DATA_BW                 BIT(25)
 #define TXDESC_RTS_DATA_BW             BIT(27)
 #define TXDESC_RTS_PRIME_CH_OFF_LOWER  BIT(28)
 #define TXDESC_RTS_PRIME_CH_OFF_UPPER  BIT(29)
-#define TXDESC_DATA_RATE_FB_SHIFT_8723B        8
-#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00
-#define TXDESC_RETRY_LIMIT_ENABLE_8723B        BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18
-#define TXDESC_RETRY_LIMIT_MASK_8723B  0x00fc0000
-#define TXDESC_RTS_RATE_SHIFT_8723B    24
-#define TXDESC_RTS_RATE_MASK_8723B     0x3f000000
+#define TXDESC40_DATA_RATE_FB_SHIFT    8
+#define TXDESC40_DATA_RATE_FB_MASK     0x00001f00
+#define TXDESC40_RETRY_LIMIT_ENABLE    BIT(17)
+#define TXDESC40_RETRY_LIMIT_SHIFT     18
+#define TXDESC40_RETRY_LIMIT_MASK      0x00fc0000
+#define TXDESC40_RTS_RATE_SHIFT                24
+#define TXDESC40_RTS_RATE_MASK         0x3f000000
 
 /* Word 5 */
-#define TXDESC_SHORT_PREAMBLE_8723B    BIT(4)
-#define TXDESC_SHORT_GI                        BIT(6)
+#define TXDESC40_SHORT_PREAMBLE                BIT(4)
+#define TXDESC32_SHORT_GI              BIT(6)
 #define TXDESC_CCX_TAG                 BIT(7)
-#define TXDESC_RETRY_LIMIT_ENABLE_8723A        BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18
-#define TXDESC_RETRY_LIMIT_MASK_8723A  0x00fc0000
+#define TXDESC32_RETRY_LIMIT_ENABLE    BIT(17)
+#define TXDESC32_RETRY_LIMIT_SHIFT     18
+#define TXDESC32_RETRY_LIMIT_MASK      0x00fc0000
 
 /* Word 6 */
 #define TXDESC_MAX_AGG_SHIFT           11
 
 /* Word 8 */
-#define TXDESC_HW_SEQ_ENABLE_8723B     BIT(15)
+#define TXDESC40_HW_SEQ_ENABLE         BIT(15)
 
 /* Word 9 */
-#define TXDESC_SEQ_SHIFT_8723B         12
-#define TXDESC_SEQ_MASK_8723B          0x00fff000
+#define TXDESC40_SEQ_SHIFT             12
+#define TXDESC40_SEQ_MASK              0x00fff000
 
 struct phy_rx_agc_info {
 #ifdef __LITTLE_ENDIAN
@@ -599,6 +629,31 @@ struct rtl8xxxu_firmware_header {
        u8      data[0];
 };
 
+/*
+ * 8723au/8192cu/8188ru required base power index offset tables.
+ */
+struct rtl8xxxu_power_base {
+       u32 reg_0e00;
+       u32 reg_0e04;
+       u32 reg_0e08;
+       u32 reg_086c;
+
+       u32 reg_0e10;
+       u32 reg_0e14;
+       u32 reg_0e18;
+       u32 reg_0e1c;
+
+       u32 reg_0830;
+       u32 reg_0834;
+       u32 reg_0838;
+       u32 reg_086c_2;
+
+       u32 reg_083c;
+       u32 reg_0848;
+       u32 reg_084c;
+       u32 reg_0868;
+};
+
 /*
  * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
  */
@@ -763,55 +818,49 @@ struct rtl8192eu_efuse_tx_power {
        u8 cck_base[6];
        u8 ht40_base[5];
        struct rtl8723au_idx ht20_ofdm_1s_diff;
-       struct rtl8723au_idx ht40_ht20_2s_diff;
-       struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */
-       struct rtl8723au_idx ht40_ht20_3s_diff;
-       struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */
-       struct rtl8723au_idx ht40_ht20_4s_diff;
-       struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */
+       struct rtl8723bu_pwr_idx pwr_diff[3];
+       u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */
 };
 
 struct rtl8192eu_efuse {
        __le16 rtl_id;
        u8 res0[0x0e];
        struct rtl8192eu_efuse_tx_power tx_power_index_A;       /* 0x10 */
-       struct rtl8192eu_efuse_tx_power tx_power_index_B;       /* 0x22 */
-       struct rtl8192eu_efuse_tx_power tx_power_index_C;       /* 0x34 */
-       struct rtl8192eu_efuse_tx_power tx_power_index_D;       /* 0x46 */
-       u8 res1[0x60];
+       struct rtl8192eu_efuse_tx_power tx_power_index_B;       /* 0x3a */
+       u8 res2[0x54];
        u8 channel_plan;                /* 0xb8 */
        u8 xtal_k;
        u8 thermal_meter;
        u8 iqk_lck;
        u8 pa_type;                     /* 0xbc */
        u8 lna_type_2g;                 /* 0xbd */
-       u8 res2[1];
+       u8 res3[1];
        u8 lna_type_5g;                 /* 0xbf */
-       u8 res13[1];
+       u8 res4[1];
        u8 rf_board_option;
        u8 rf_feature_option;
        u8 rf_bt_setting;
        u8 eeprom_version;
        u8 eeprom_customer_id;
-       u8 res3[3];
+       u8 res5[3];
        u8 rf_antenna_option;           /* 0xc9 */
-       u8 res4[6];
+       u8 res6[6];
        u8 vid;                         /* 0xd0 */
-       u8 res5[1];
+       u8 res7[1];
        u8 pid;                         /* 0xd2 */
-       u8 res6[1];
+       u8 res8[1];
        u8 usb_optional_function;
-       u8 res7[2];
+       u8 res9[2];
        u8 mac_addr[ETH_ALEN];          /* 0xd7 */
-       u8 res8[2];
+       u8 res10[2];
        u8 vendor_name[7];
-       u8 res9[2];
+       u8 res11[2];
        u8 device_name[0x0b];           /* 0xe8 */
-       u8 res10[2];
+       u8 res12[2];
        u8 serial[0x0b];                /* 0xf5 */
-       u8 res11[0x30];
+       u8 res13[0x30];
        u8 unknown[0x0d];               /* 0x130 */
-       u8 res12[0xc3];
+       u8 res14[0xc3];
 };
 
 struct rtl8xxxu_reg8val {
@@ -1177,6 +1226,7 @@ struct rtl8xxxu_priv {
        struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT];
        struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT];
        struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT];
+       struct rtl8xxxu_power_base *power_base;
        u32 chip_cut:4;
        u32 rom_rev:4;
        u32 is_multi_func:1;
@@ -1204,7 +1254,6 @@ struct rtl8xxxu_priv {
        u8 rf_paths;
        u8 rx_paths;
        u8 tx_paths;
-       u32 rf_mode_ag[2];
        u32 rege94;
        u32 rege9c;
        u32 regeb4;
@@ -1236,8 +1285,9 @@ struct rtl8xxxu_priv {
        u32 mac_backup[RTL8XXXU_MAC_REGS];
        u32 bb_backup[RTL8XXXU_BB_REGS];
        u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
-       u32 rtlchip;
+       enum rtl8xxxu_rtl_chip rtl_chip;
        u8 pi_enabled:1;
+       u8 no_pape:1;
        u8 int_buf[USB_INTR_CONTENT_LENGTH];
 };
 
@@ -1260,6 +1310,8 @@ struct rtl8xxxu_fileops {
        void (*power_off) (struct rtl8xxxu_priv *priv);
        void (*reset_8051) (struct rtl8xxxu_priv *priv);
        int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+       void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
+       int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
        void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
        void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
        void (*config_channel) (struct ieee80211_hw *hw);
@@ -1269,6 +1321,7 @@ struct rtl8xxxu_fileops {
        void (*init_statistics) (struct rtl8xxxu_priv *priv);
        void (*enable_rf) (struct rtl8xxxu_priv *priv);
        void (*disable_rf) (struct rtl8xxxu_priv *priv);
+       void (*usb_quirks) (struct rtl8xxxu_priv *priv);
        void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
                              bool ht40);
        void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
@@ -1279,9 +1332,18 @@ struct rtl8xxxu_fileops {
        u16 mbox_ext_reg;
        char mbox_ext_width;
        char tx_desc_size;
+       char rx_desc_size;
        char has_s0s1;
        u32 adda_1t_init;
        u32 adda_1t_path_on;
        u32 adda_2t_path_on_a;
        u32 adda_2t_path_on_b;
+       u16 trxff_boundary;
+       u8 pbp_rx;
+       u8 pbp_tx;
+       struct rtl8xxxu_reg8val *mactable;
+       u8 total_page_num;
+       u8 page_num_hi;
+       u8 page_num_lo;
+       u8 page_num_norm;
 };
index e545e849f5a37c573ad31ca4344ce6434d22b062..b0e0c642302c48b5dcd2fdd8767b88f070104b67 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
 #define  AFE_XTAL_GATE_DIG             BIT(17)
 #define  AFE_XTAL_BT_GATE              BIT(20)
 
+/*
+ * 0x0028 is also known as REG_AFE_CTRL2 on 8723bu/8192eu
+ */
 #define REG_AFE_PLL_CTRL               0x0028
 #define  AFE_PLL_ENABLE                        BIT(0)
 #define  AFE_PLL_320_ENABLE            BIT(1)
                                                   control */
 #define  MULTI_GPS_FUNC_EN             BIT(22) /* GPS function enable */
 
+#define REG_AFE_CTRL4                  0x0078  /* 8192eu/8723bu */
 #define REG_LDO_SW_CTRL                        0x007c  /* 8192eu */
 
 #define REG_MCU_FW_DL                  0x0080
 #define REG_RQPN                       0x0200
 #define  RQPN_HI_PQ_SHIFT              0
 #define  RQPN_LO_PQ_SHIFT              8
-#define  RQPN_NORM_PQ_SHIFT            16
+#define  RQPN_PUB_PQ_SHIFT             16
 #define  RQPN_LOAD                     BIT(31)
 
 #define REG_FIFOPAGE                   0x0204
 
 /*  spec version 11 */
 /* 0x0400 ~ 0x047F     Protocol Configuration */
-#define REG_VOQ_INFORMATION            0x0400
-#define REG_VIQ_INFORMATION            0x0404
-#define REG_BEQ_INFORMATION            0x0408
-#define REG_BKQ_INFORMATION            0x040c
-#define REG_MGQ_INFORMATION            0x0410
-#define REG_HGQ_INFORMATION            0x0414
-#define REG_BCNQ_INFORMATION           0x0418
+/* 8192c, 8192d */
+#define REG_VOQ_INFO                   0x0400
+#define REG_VIQ_INFO                   0x0404
+#define REG_BEQ_INFO                   0x0408
+#define REG_BKQ_INFO                   0x040c
+/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */
+#define REG_Q0_INFO                    0x400
+#define REG_Q1_INFO                    0x404
+#define REG_Q2_INFO                    0x408
+#define REG_Q3_INFO                    0x40c
+
+#define REG_MGQ_INFO                   0x0410
+#define REG_HGQ_INFO                   0x0414
+#define REG_BCNQ_INFO                  0x0418
 
 #define REG_CPU_MGQ_INFORMATION                0x041c
 #define REG_FWHW_TXQ_CTRL              0x0420
 #define REG_DATA_SUBCHANNEL            0x0483
 /* 8723au */
 #define REG_INIDATA_RATE_SEL           0x0484
+/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_3_8732B                0x0484
+#define REG_MACID_SLEEP_1_8732B                0x0488
 
 #define REG_POWER_STATUS               0x04a4
 #define REG_POWER_STAGE1               0x04b4
 #define REG_PKT_VO_VI_LIFE_TIME                0x04c0
 #define REG_PKT_BE_BK_LIFE_TIME                0x04c2
 #define REG_STBC_SETTING               0x04c4
+#define REG_QUEUE_CTRL                 0x04c6
 #define REG_HT_SINGLE_AMPDU_8723B      0x04c7
 #define REG_PROT_MODE_CTRL             0x04c8
 #define REG_MAX_AGGR_NUM               0x04ca
 #define REG_RTS_MAX_AGGR_NUM           0x04cb
 #define REG_BAR_MODE_CTRL              0x04cc
 #define REG_RA_TRY_RATE_AGG_LMT                0x04cf
+/* MACID_DROP for 8723a */
+#define REG_MACID_DROP_8732A           0x04d0
+/* EARLY_MODE_CONTROL 8188e */
+#define REG_EARLY_MODE_CONTROL_8188E   0x04d0
+/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_2_8732B                0x04d0
+#define REG_MACID_SLEEP                        0x04d4
 #define REG_NQOS_SEQ                   0x04dc
 #define REG_QOS_SEQ                    0x04de
 #define REG_NEED_CPU_HANDLE            0x04e0
 #define  CCK0_SIDEBAND                 BIT(4)
 
 #define REG_CCK0_AFE_SETTING           0x0a04
+#define  CCK0_AFE_RX_MASK              0x0f000000
+#define  CCK0_AFE_RX_ANT_AB            BIT(24)
+#define  CCK0_AFE_RX_ANT_A             0
+#define  CCK0_AFE_RX_ANT_B             (BIT(24) | BIT(26))
 
 #define REG_CONFIG_ANT_A               0x0b68
 #define REG_CONFIG_ANT_B               0x0b6c
 #define  USB_HIMR_ROK                  BIT(0)  /*  Receive DMA OK Interrupt */
 
 #define REG_USB_SPECIAL_OPTION         0xfe55
+#define REG_USB_HRPWM                  0xfe58
 #define REG_USB_DMA_AGG_TO             0xfe5b
 #define REG_USB_AGG_TO                 0xfe5c
 #define REG_USB_AGG_TH                 0xfe5d
 #define RF6052_REG_T_METER_8723B       0x42
 #define RF6052_REG_UNKNOWN_43          0x43
 #define RF6052_REG_UNKNOWN_55          0x55
+#define RF6052_REG_UNKNOWN_56          0x56
 #define RF6052_REG_S0S1                        0xb0
 #define RF6052_REG_UNKNOWN_DF          0xdf
 #define RF6052_REG_UNKNOWN_ED          0xed
index 0517a4f2d3f2f0b602a6c51763a1666e03abce2b..c74eb139bfa14d5ea1e04c223f441d2de15d7b9b 100644 (file)
@@ -131,7 +131,7 @@ static struct ieee80211_rate rtl_ratetable_5g[] = {
 };
 
 static const struct ieee80211_supported_band rtl_band_2ghz = {
-       .band = IEEE80211_BAND_2GHZ,
+       .band = NL80211_BAND_2GHZ,
 
        .channels = rtl_channeltable_2g,
        .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
@@ -143,7 +143,7 @@ static const struct ieee80211_supported_band rtl_band_2ghz = {
 };
 
 static struct ieee80211_supported_band rtl_band_5ghz = {
-       .band = IEEE80211_BAND_5GHZ,
+       .band = NL80211_BAND_5GHZ,
 
        .channels = rtl_channeltable_5g,
        .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
@@ -197,7 +197,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
 
        ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
-       /*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+       /*hw->wiphy->bands[NL80211_BAND_2GHZ]
         *base on ant_num
         *rx_mask: RX mask
         *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -328,26 +328,26 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
            rtlhal->bandset == BAND_ON_BOTH) {
                /* 1: 2.4 G bands */
                /* <1> use  mac->bands as mem for hw->wiphy->bands */
-               sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+               sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
 
-               /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+               /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
                 * to default value(1T1R) */
-               memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+               memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz,
                                sizeof(struct ieee80211_supported_band));
 
                /* <3> init ht cap base on ant_num */
                _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
 
                /* <4> set mac->sband to wiphy->sband */
-               hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+               hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 
                /* 2: 5 G bands */
                /* <1> use  mac->bands as mem for hw->wiphy->bands */
-               sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+               sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
 
-               /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+               /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
                 * to default value(1T1R) */
-               memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+               memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz,
                                sizeof(struct ieee80211_supported_band));
 
                /* <3> init ht cap base on ant_num */
@@ -355,15 +355,15 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
 
                _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
                /* <4> set mac->sband to wiphy->sband */
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+               hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
        } else {
                if (rtlhal->current_bandtype == BAND_ON_2_4G) {
                        /* <1> use  mac->bands as mem for hw->wiphy->bands */
-                       sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+                       sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
 
-                       /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+                       /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
                         * to default value(1T1R) */
-                       memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+                       memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]),
                               &rtl_band_2ghz,
                               sizeof(struct ieee80211_supported_band));
 
@@ -371,14 +371,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
                        _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
 
                        /* <4> set mac->sband to wiphy->sband */
-                       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+                       hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
                } else if (rtlhal->current_bandtype == BAND_ON_5G) {
                        /* <1> use  mac->bands as mem for hw->wiphy->bands */
-                       sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+                       sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
 
-                       /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+                       /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
                         * to default value(1T1R) */
-                       memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+                       memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]),
                               &rtl_band_5ghz,
                               sizeof(struct ieee80211_supported_band));
 
@@ -387,7 +387,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
 
                        _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
                        /* <4> set mac->sband to wiphy->sband */
-                       hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+                       hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
                } else {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
                                 rtlhal->current_bandtype);
@@ -861,7 +861,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
 
 /* mac80211's rate_idx is like this:
  *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
  *
  * B/G rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
@@ -871,7 +871,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
  * (rx_status->flag & RX_FLAG_HT) = 1,
  * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
  *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * 5G band:rx_status->band == NL80211_BAND_5GHZ
  * A rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
  * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
@@ -958,7 +958,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
                return rate_idx;
        }
        if (false == isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+               if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
                        switch (desc_rate) {
                        case DESC_RATE1M:
                                rate_idx = 0;
index 451456835f8791283bd854703454136b0c1fd8b8..a30af6cc21f3c9a60d242a97d33a34794c296d25 100644 (file)
@@ -70,83 +70,83 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
        if (level_num == 2) {
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "BT Rssi pre state = LOW\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "BT Rssi pre state = LOW\n");
                        if (btrssi >= (rssi_thresh +
                                       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                btrssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to High\n");
                        } else {
                                btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state stay at Low\n");
                        }
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "BT Rssi pre state = HIGH\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "BT Rssi pre state = HIGH\n");
                        if (btrssi < rssi_thresh) {
                                btrssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to Low\n");
                        } else {
                                btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "BT Rssi thresh error!!\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "BT Rssi pre state = LOW\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "BT Rssi pre state = LOW\n");
                        if (btrssi >= (rssi_thresh +
                                      BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                btrssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to Medium\n");
                        } else {
                                btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_MEDIUM) ||
                           (coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "[BTCoex], BT Rssi pre state = MEDIUM\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "[BTCoex], BT Rssi pre state = MEDIUM\n");
                        if (btrssi >= (rssi_thresh1 +
                                       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                btrssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to High\n");
                        } else if (btrssi < rssi_thresh) {
                                btrssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to Low\n");
                        } else {
                                btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state stay at Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state stay at Medium\n");
                        }
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "BT Rssi pre state = HIGH\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "BT Rssi pre state = HIGH\n");
                        if (btrssi < rssi_thresh1) {
                                btrssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state switch to Medium\n");
                        } else {
                                btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "BT Rssi state stay at High\n");
                        }
                }
        }
@@ -173,32 +173,28 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                        if (wifirssi >= (rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                wifirssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to High\n");
                        } else {
                                wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifirssi < rssi_thresh) {
                                wifirssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to Low\n");
                        } else {
                                wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-                                 "wifi RSSI thresh error!!\n");
+                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                   "wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -209,14 +205,12 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                        if (wifirssi >= (rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to Medium\n");
                        } else {
                                wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                            BTC_RSSI_STATE_MEDIUM) ||
@@ -225,31 +219,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                        if (wifirssi >= (rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
                                wifirssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to High\n");
                        } else if (wifirssi < rssi_thresh) {
                                wifirssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to Low\n");
                        } else {
                                wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state stay at Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifirssi < rssi_thresh1) {
                                wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state switch to Medium\n");
                        } else {
                                wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -284,26 +273,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is enabled !!\n");
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], bt all counters = 0, %d times!!\n",
-                         bt_disable_cnt);
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], bt all counters = 0, %d times!!\n",
+                           bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                                 "[BTCoex], BT is disabled !!\n");
+                       btc_alg_dbg(ALGO_BT_MONITOR,
+                                   "[BTCoex], BT is disabled !!\n");
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is from %s to %s!!\n",
-                         (pre_bt_disabled ? "disabled" : "enabled"),
-                         (bt_disabled ? "disabled" : "enabled"));
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is from %s to %s!!\n",
+                           (pre_bt_disabled ? "disabled" : "enabled"),
+                           (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
        }
 }
@@ -499,12 +488,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                   reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -518,9 +507,9 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -592,8 +581,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
 
        if (!bt_link_info->bt_link_exist) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "No BT link exists!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -608,27 +597,27 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (numdiffprofile == 1) {
                if (bt_link_info->sco_exist) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "SCO only\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "SCO only\n");
                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "HID only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "HID only\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "A2DP only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "A2DP only\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "PAN(HS) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "PAN(HS) only\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "PAN(EDR) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "PAN(EDR) only\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_PANEDR;
                                }
@@ -637,21 +626,21 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (numdiffprofile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "SCO + HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "SCO + HID\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "SCO + A2DP ==> SCO\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "SCO + A2DP ==> SCO\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + PAN(EDR)\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
                                }
@@ -660,38 +649,38 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
                                if (stack_info->num_of_hid >= 2) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID*2 + A2DP\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID*2 + A2DP\n");
                                        algorithm =
                                        BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID + A2DP\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID + A2DP\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
                                }
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -701,30 +690,30 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "SCO + HID + A2DP ==> HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO + A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO + A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -734,13 +723,13 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "HID + A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                        BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -752,12 +741,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "ErrorSCO+HID+A2DP+PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "ErrorSCO+HID+A2DP+PAN(HS)\n");
 
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "SCO+HID+A2DP+PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "SCO+HID+A2DP+PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -778,10 +767,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
         */
        h2c_parameter[0] = dac_swinglvl;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -793,9 +782,9 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
 
        h2c_parameter[0] = dec_btpwr_lvl;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
-                 dec_btpwr_lvl, h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+                   dec_btpwr_lvl, h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -803,15 +792,15 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
                                      bool force_exec, u8 dec_btpwr_lvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s Dec BT power level = %d\n",
-                 (force_exec ? "force to" : ""), dec_btpwr_lvl);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s Dec BT power level = %d\n",
+                   (force_exec ? "force to" : ""), dec_btpwr_lvl);
        coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
-                         coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
        }
        halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
 
@@ -828,10 +817,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
        if (enable_autoreport)
                h2c_parameter[0] |= BIT0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                 (enable_autoreport ? "Enabled!!" : "Disabled!!"),
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                   (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -840,17 +829,17 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
                                          bool force_exec,
                                          bool enable_autoreport)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s BT Auto report = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((enable_autoreport) ? "Enabled" : "Disabled"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s BT Auto report = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((enable_autoreport) ? "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_autoreport;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
-                         coex_dm->pre_bt_auto_report,
-                         coex_dm->cur_bt_auto_report);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+                           coex_dm->pre_bt_auto_report,
+                           coex_dm->cur_bt_auto_report);
 
                if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
                        return;
@@ -864,16 +853,16 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
                                            bool force_exec, u8 fw_dac_swinglvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s set FW Dac Swing level = %d\n",
-                 (force_exec ? "force to" : ""), fw_dac_swinglvl);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s set FW Dac Swing level = %d\n",
+                   (force_exec ? "force to" : ""), fw_dac_swinglvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
-                         coex_dm->pre_fw_dac_swing_lvl,
-                         coex_dm->cur_fw_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+                           coex_dm->pre_fw_dac_swing_lvl,
+                           coex_dm->cur_fw_dac_swing_lvl);
 
                if (coex_dm->pre_fw_dac_swing_lvl ==
                    coex_dm->cur_fw_dac_swing_lvl)
@@ -891,8 +880,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 {
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
@@ -900,8 +889,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                 * After initialized, we can use coex_dm->btRf0x1eBackup
                 */
                if (btcoexist->initilized) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                                 "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                                  0xfffff,
                                                  coex_dm->bt_rf0x1e_backup);
@@ -912,17 +901,17 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
                                      bool force_exec, bool rx_rf_shrink_on)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((rx_rf_shrink_on) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((rx_rf_shrink_on) ? "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
-                         coex_dm->pre_rf_rx_lpf_shrink,
-                         coex_dm->cur_rf_rx_lpf_shrink);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+                           coex_dm->pre_rf_rx_lpf_shrink,
+                           coex_dm->cur_rf_rx_lpf_shrink);
 
                if (coex_dm->pre_rf_rx_lpf_shrink ==
                    coex_dm->cur_rf_rx_lpf_shrink)
@@ -939,8 +928,8 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
 {
        u8 val = (u8)level;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -958,22 +947,22 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
                                     bool force_exec, bool dac_swingon,
                                     u32 dac_swinglvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
-                 (force_exec ? "force to" : ""),
-                 ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+                   (force_exec ? "force to" : ""),
+                   ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
        coex_dm->cur_dac_swing_on = dac_swingon;
        coex_dm->cur_dac_swing_lvl = dac_swinglvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
-                         coex_dm->pre_dac_swing_on,
-                         coex_dm->pre_dac_swing_lvl);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
-                         coex_dm->cur_dac_swing_on,
-                         coex_dm->cur_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+                           coex_dm->pre_dac_swing_on,
+                           coex_dm->pre_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+                           coex_dm->cur_dac_swing_on,
+                           coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -991,8 +980,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
 {
        /* BB AGC Gain Table */
        if (agc_table_en) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB Agc Table On!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB Agc Table On!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -1000,8 +989,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB Agc Table Off!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB Agc Table Off!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1014,16 +1003,17 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
                                     bool force_exec, bool agc_table_en)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s %s Agc Table\n",
-                 (force_exec ? "force to" : ""),
-                 ((agc_table_en) ? "Enable" : "Disable"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s %s Agc Table\n",
+                   (force_exec ? "force to" : ""),
+                   ((agc_table_en) ? "Enable" : "Disable"));
        coex_dm->cur_agc_table_en = agc_table_en;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-                         coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+                           coex_dm->pre_agc_table_en,
+                           coex_dm->cur_agc_table_en);
 
                if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
                        return;
@@ -1037,20 +1027,20 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -1059,30 +1049,30 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
                                       u32 val0x6c0, u32 val0x6c4,
                                       u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
-                 (force_exec ? "force to" : ""), val0x6c0);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-                 val0x6c4, val0x6c8, val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+                   (force_exec ? "force to" : ""), val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+                   val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
-                         coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
-                         coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n",
-                         coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
-                         coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+                           coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+                           coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+                           coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+                           coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1136,9 +1126,9 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
        if (enable)
                h2c_parameter[0] |= BIT0; /* function enable */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1146,18 +1136,18 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
                                          bool force_exec, bool enable)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn Ignore WlanAct %s\n",
-                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn Ignore WlanAct %s\n",
+                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPreIgnoreWlanAct = %d ",
-                         coex_dm->pre_ignore_wlan_act);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "bCurIgnoreWlanAct = %d!!\n",
-                         coex_dm->cur_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPreIgnoreWlanAct = %d ",
+                           coex_dm->pre_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "bCurIgnoreWlanAct = %d!!\n",
+                           coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1185,11 +1175,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-                 h2c_parameter[0],
-                 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-                 h2c_parameter[3] << 8 | h2c_parameter[4]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+                   h2c_parameter[0],
+                   h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+                   h2c_parameter[3] << 8 | h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1213,20 +1203,20 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-                 (force_exec ? "force to" : ""),
-                 (turn_on ? "ON" : "OFF"), type);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+                   (force_exec ? "force to" : ""),
+                   (turn_on ? "ON" : "OFF"), type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-                         coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-                         coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1353,8 +1343,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
        u8 mimops = BTC_MIMO_PS_DYNAMIC;
        u32 disra_mask = 0x0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], REAL set SS Type = %d\n", sstype);
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], REAL set SS Type = %d\n", sstype);
 
        disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
                                                   coex_dm->curra_masktype);
@@ -1386,9 +1376,9 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
                                          bool force_exec, u8 new_sstype)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], %s Switch SS Type = %d\n",
-                 (force_exec ? "force to" : ""), new_sstype);
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], %s Switch SS Type = %d\n",
+                   (force_exec ? "force to" : ""), new_sstype);
        coex_dm->cur_sstype = new_sstype;
 
        if (!force_exec) {
@@ -1469,8 +1459,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non-connected idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non-connected idle!!\n");
 
                if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                     coex_dm->bt_status) ||
@@ -1506,8 +1496,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
 
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Wifi connected + BT non connected-idle!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Wifi connected + BT non connected-idle!!\n");
 
                        halbtc8192e2ant_switch_sstype(btcoexist,
                                                      NORMAL_EXEC, 2);
@@ -1534,8 +1524,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
 
                        if (bt_hson)
                                return false;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Wifi connected + BT connected-idle!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Wifi connected + BT connected-idle!!\n");
 
                        halbtc8192e2ant_switch_sstype(btcoexist,
                                                      NORMAL_EXEC, 2);
@@ -1560,12 +1550,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                           &low_pwr_disable);
 
                        if (wifi_busy) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "Wifi Connected-Busy + BT Busy!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "Wifi Connected-Busy + BT Busy!!\n");
                                common = false;
                        } else {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "Wifi Connected-Idle + BT Busy!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "Wifi Connected-Idle + BT Busy!!\n");
 
                                halbtc8192e2ant_switch_sstype(btcoexist,
                                                              NORMAL_EXEC, 1);
@@ -1592,9 +1582,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1689,9 +1678,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 71);
@@ -1795,9 +1783,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 6);
@@ -1886,9 +1873,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 2);
@@ -1983,9 +1969,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 7);
@@ -2074,9 +2059,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 3);
@@ -2178,13 +2162,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        int result;
        u8 retry_cnt = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], TdmaDurationAdjust()\n");
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], TdmaDurationAdjust()\n");
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2288,11 +2272,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        } else {
                /* accquire the BT TRx retry count from BT_Info byte2 */
                retry_cnt = coex_sta->bt_retry_cnt;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], retry_cnt = %d\n", retry_cnt);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
-                         up, dn, m, n, wait_cnt);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], retry_cnt = %d\n", retry_cnt);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+                           up, dn, m, n, wait_cnt);
                result = 0;
                wait_cnt++;
                /* no retry in the last 2-second duration */
@@ -2309,9 +2293,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex]Increase wifi duration!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex]Increase wifi duration!!\n");
                        }
                } else if (retry_cnt <= 3) {
                        up--;
@@ -2334,9 +2317,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_cnt = 0;
                                result = -1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "Reduce wifi duration for retry<3\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "Reduce wifi duration for retry<3\n");
                        }
                } else {
                        if (wait_cnt == 1)
@@ -2352,12 +2334,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_cnt = 0;
                        result = -1;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "Decrease wifi duration for retryCounter>3!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "Decrease wifi duration for retryCounter>3!!\n");
                }
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], max Interval = %d\n", max_interval);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        btc8192e_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2373,11 +2355,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool scan = false, link = false, roam = false;
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], PsTdma type dismatch!!!, ");
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "curPsTdma=%d, recordPsTdma=%d\n",
-                         coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], PsTdma type dismatch!!!, ");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "curPsTdma=%d, recordPsTdma=%d\n",
+                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2388,9 +2370,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                true,
                                                coex_dm->tdma_adj_type);
                else
-                       BTC_PRINT(BTC_MSG_ALGORITHM,
-                                 ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
        }
 }
 
@@ -2594,8 +2575,8 @@ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
             btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
            (wifirssi_state == BTC_RSSI_STATE_LOW ||
             wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
                long_dist = true;
        }
        if (long_dist) {
@@ -3100,105 +3081,105 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        u8 algorithm = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], RunCoexistMechanism()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], return for Manual CTRL <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], return for Manual CTRL <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is under IPS !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BT is under inquiry/page scan !!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BT is under inquiry/page scan !!\n");
                halbtc8192e2ant_action_bt_inquiry(btcoexist);
                return;
        }
 
        coex_dm->cur_algorithm = algorithm;
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
        if (halbtc8192e2ant_is_common_action(btcoexist)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Action 2-Ant common.\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Action 2-Ant common\n");
                coex_dm->auto_tdma_adjust = false;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
-                                 coex_dm->pre_algorithm,
-                                 coex_dm->cur_algorithm);
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+                                   coex_dm->pre_algorithm,
+                                   coex_dm->cur_algorithm);
                        coex_dm->auto_tdma_adjust = false;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8192E_2ANT_COEX_ALGO_SCO:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = SCO.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = SCO\n");
                        halbtc8192e2ant_action_sco(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
                        halbtc8192e2ant_action_sco_pan(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = HID\n");
                        halbtc8192e2ant_action_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = A2DP\n");
                        halbtc8192e2ant_action_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = PAN(EDR)\n");
                        halbtc8192e2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = HS mode.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = HS mode\n");
                        halbtc8192e2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = PAN+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = PAN+A2DP\n");
                        halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        halbtc8192e2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = HID+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = HID+A2DP\n");
                        halbtc8192e2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "Action 2-Ant, algorithm = unknown!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "Action 2-Ant, algorithm = unknown!!\n");
                        /* halbtc8192e2ant_coex_alloff(btcoexist); */
                        break;
                }
@@ -3212,8 +3193,8 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
        u16 u16tmp = 0;
        u8 u8tmp = 0;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], 2Ant Init HW Config!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], 2Ant Init HW Config!!\n");
 
        if (backup) {
                /* backup rf 0x1e value */
@@ -3296,8 +3277,8 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], Coex Mechanism Init!!\n");
        halbtc8192e2ant_init_coex_dm(btcoexist);
 }
 
@@ -3525,13 +3506,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_IPS_ENTER == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS ENTER notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8192e2ant_coex_alloff(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS LEAVE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
        }
 }
@@ -3539,12 +3520,12 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_LPS_ENABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS ENABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS DISABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
@@ -3552,21 +3533,21 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_SCAN_START == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN START notify\n");
        else if (BTC_SCAN_FINISH == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_ASSOCIATE_START == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT START notify\n");
        else if (BTC_ASSOCIATE_FINISH == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3582,11 +3563,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA connect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA connect notify\n");
        else
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA disconnect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3606,10 +3587,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x66 = 0x%x\n",
-                 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                 h2c_parameter[2]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x66 = 0x%x\n",
+                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                   h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3618,8 +3599,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type)
 {
        if (type == BTC_PACKET_DHCP)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], DHCP Packet notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3637,19 +3618,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Bt info[%d], length=%d, hex data = [",
-                 rsp_source, length);
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Bt info[%d], length=%d, hex data = [",
+                     rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1)
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x]\n", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x]\n", tmp_buf[i]);
                else
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x, ", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x, ", tmp_buf[i]);
        }
 
        if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3666,8 +3647,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                 * because bt is reset and loss of the info.
                 */
                if ((coex_sta->bt_info_ext & BIT1)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "bit1, send wifi BW&Chnl to BT!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "bit1, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -3683,8 +3664,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                if ((coex_sta->bt_info_ext & BIT3)) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "bit3, BT NOT ignore Wlan active!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "bit3, BT NOT ignore Wlan active!\n");
                                halbtc8192e2ant_IgnoreWlanAct(btcoexist,
                                                              FORCE_EXEC,
                                                              false);
@@ -3742,25 +3723,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BT Non-Connected idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BT Non-Connected idle!!!\n");
        } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
                   (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3788,7 +3769,7 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
        halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
        ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3801,29 +3782,29 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "=======================Periodical=======================\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "=======================Periodical=======================\n");
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "************************************************\n");
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                         board_info->pg_ant_num, board_info->btdm_ant_num,
-                         board_info->btdm_ant_pos);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "BT stack/ hci ext ver = %s / %d\n",
-                         ((stack_info->profile_notified) ? "Yes" : "No"),
-                         stack_info->hci_version);
+               btc_iface_dbg(INTF_INIT,
+                             "************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                             board_info->pg_ant_num, board_info->btdm_ant_num,
+                             board_info->btdm_ant_pos);
+               btc_iface_dbg(INTF_INIT,
+                             "BT stack/ hci ext ver = %s / %d\n",
+                             ((stack_info->profile_notified) ? "Yes" : "No"),
+                             stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                         glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-                         fw_ver, bt_patch_ver, bt_patch_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                             glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+                             fw_ver, bt_patch_ver, bt_patch_ver);
+               btc_iface_dbg(INTF_INIT,
+                             "************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
index 7e239d3cea268b739bdfdc6add736d9f33342178..16add42a62af7e0d824f98a4a4984a63452d41b5 100644 (file)
@@ -74,28 +74,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "[BTCoex], BT Rssi thresh error!!\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -104,12 +104,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                                        BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +118,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh1 +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -165,32 +165,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-                                 "[BTCoex], wifi RSSI thresh error!!\n");
+                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                   "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -201,14 +197,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -217,31 +211,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -435,9 +424,9 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
 
        h2c_parameter[0] |= BIT0;       /* trigger*/
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -532,8 +521,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], No BT link exists!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -548,27 +537,27 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (numdiffprofile == 1) {
                if (bt_link_info->sco_exist) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT Profile = SCO only\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT Profile = SCO only\n");
                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = HID only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = HID only\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = A2DP only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = A2DP only\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = PAN(HS) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = PAN(HS) only\n");
                                        algorithm =
                                                BT_8723B_1ANT_COEX_ALGO_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = PAN(EDR) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = PAN(EDR) only\n");
                                        algorithm =
                                                BT_8723B_1ANT_COEX_ALGO_PANEDR;
                                }
@@ -577,21 +566,21 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (numdiffprofile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + HID\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + PAN(HS)\n");
                                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -599,32 +588,32 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = HID + A2DP\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = HID + A2DP\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -634,31 +623,31 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -668,13 +657,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -686,11 +675,11 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -717,9 +706,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36 */
        }
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                 (low_penalty_ra ? "ON!!" : "OFF!!"));
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                   (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -743,20 +732,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -765,10 +754,10 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
                                       u32 val0x6c4, u32 val0x6c8,
                                       u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
-                 (force_exec ? "force to" : ""),
-                 val0x6c0, val0x6c4, val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+                   (force_exec ? "force to" : ""),
+                   val0x6c0, val0x6c4, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
@@ -839,9 +828,9 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
        if (enable)
                h2c_parameter[0] |= BIT0;       /* function enable */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -849,16 +838,16 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn Ignore WlanAct %s\n",
-                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn Ignore WlanAct %s\n",
+                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
-                         coex_dm->pre_ignore_wlan_act,
-                         coex_dm->cur_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+                           coex_dm->pre_ignore_wlan_act,
+                           coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -882,8 +871,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
 
        if (ap_enable) {
                if ((byte1 & BIT4) && !(byte1 & BIT5)) {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "[BTCoex], FW for 1Ant AP mode\n");
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "[BTCoex], FW for 1Ant AP mode\n");
                        real_byte1 &= ~BIT4;
                        real_byte1 |= BIT5;
 
@@ -904,13 +893,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = real_byte5;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
-                 h2c_parameter[0],
-                 h2c_parameter[1] << 24 |
-                 h2c_parameter[2] << 16 |
-                 h2c_parameter[3] << 8 |
-                 h2c_parameter[4]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+                   h2c_parameter[0],
+                   h2c_parameter[1] << 24 |
+                   h2c_parameter[2] << 16 |
+                   h2c_parameter[3] << 8 |
+                   h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -929,22 +918,22 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
                                    bool force_exec,
                                    u8 lps_val, u8 rpwm_val)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-                 (force_exec ? "force to" : ""), lps_val, rpwm_val);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+                   (force_exec ? "force to" : ""), lps_val, rpwm_val);
        coex_dm->cur_lps = lps_val;
        coex_dm->cur_rpwm = rpwm_val;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
-                         coex_dm->cur_lps, coex_dm->cur_rpwm);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+                           coex_dm->cur_lps, coex_dm->cur_rpwm);
 
                if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
                    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
-                                 coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+                                   coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
                        return;
                }
@@ -958,8 +947,8 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
                                         bool low_penalty_ra)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
        halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1174,13 +1163,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
 
        if (!force_exec) {
                if (coex_dm->cur_ps_tdma_on)
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], ******** TDMA(on, %d) *********\n",
-                                 coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], ******** TDMA(on, %d) *********\n",
+                                   coex_dm->cur_ps_tdma);
                else
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], ******** TDMA(off, %d) ********\n",
-                                 coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], ******** TDMA(off, %d) ********\n",
+                                   coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1394,45 +1383,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
 
        if (!wifi_connected &&
            BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi connected + BT non connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (!wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi connected + BT connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi connected + BT connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (!wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else {
                if (wifi_busy)
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                else
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
                commom = false;
        }
@@ -1451,8 +1440,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
        u8 retry_count = 0, bt_info_ext;
        bool wifi_busy = false;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], TdmaDurationAdjustForAcl()\n");
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
        if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
                wifi_busy = true;
@@ -1481,8 +1470,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                coex_dm->tdma_adj_type = 2;
@@ -1513,9 +1502,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Increase wifi duration!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        up--;
@@ -1538,9 +1526,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        if (wait_count == 1)
@@ -1556,8 +1543,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
                if (result == -1) {
@@ -1602,9 +1589,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                        }
                } else {          /*no change */
                        /*if busy / idle change */
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex],********* TDMA(on, %d) ********\n",
-                                 coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex],********* TDMA(on, %d) ********\n",
+                                   coex_dm->cur_ps_tdma);
                }
 
                if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -2010,15 +1997,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        bool scan = false, link = false, roam = false;
        bool under_4way = false, ap_enable = false;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], CoexForWifiConnect()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], CoexForWifiConnect()===>\n");
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
                           &under_4way);
        if (under_4way) {
                halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
                return;
        }
 
@@ -2032,8 +2019,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
                else
                        halbtc8723b1ant_action_wifi_connected_special_packet(
                                                                     btcoexist);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
                return;
        }
 
@@ -2102,58 +2089,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
        if (!halbtc8723b1ant_is_common_action(btcoexist)) {
                switch (coex_dm->cur_algorithm) {
                case BT_8723B_1ANT_COEX_ALGO_SCO:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = SCO.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = SCO\n");
                        halbtc8723b1ant_action_sco(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID\n");
                        halbtc8723b1ant_action_hid(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = A2DP\n");
                        halbtc8723b1ant_action_a2dp(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
                        halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN(EDR)\n");
                        halbtc8723b1ant_action_pan_edr(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HS mode.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HS mode\n");
                        halbtc8723b1ant_action_pan_hs(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN+A2DP\n");
                        halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
                        halbtc8723b1ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
                        btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID+A2DP\n");
                        halbtc8723b1ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = coexist All Off!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = coexist All Off!!\n");
                        break;
                }
                coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2171,24 +2158,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], RunCoexistMechanism()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (btcoexist->stop_coex_dm) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is under IPS !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
@@ -2267,8 +2254,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (!wifi_connected) {
                bool scan = false, link = false, roam = false;
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is non connected-idle !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is non connected-idle !!!\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2305,8 +2292,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
        u8 u8tmp = 0;
        u32 cnt_bt_cal_chk = 0;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], 1Ant Init HW Config!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], 1Ant Init HW Config!!\n");
 
        if (backup) {/* backup rf 0x1e value */
                coex_dm->backup_arfr_cnt1 =
@@ -2333,14 +2320,14 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
                u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
                cnt_bt_cal_chk++;
                if (u32tmp & BIT0) {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                                 "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
-                                 cnt_bt_cal_chk);
+                       btc_iface_dbg(INTF_INIT,
+                                     "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+                                     cnt_bt_cal_chk);
                        mdelay(50);
                } else {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                                 "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
-                                 cnt_bt_cal_chk);
+                       btc_iface_dbg(INTF_INIT,
+                                     "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+                                     cnt_bt_cal_chk);
                        break;
                }
        }
@@ -2383,8 +2370,8 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], Coex Mechanism Init!!\n");
 
        btcoexist->stop_coex_dm = false;
 
@@ -2677,8 +2664,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                return;
 
        if (BTC_IPS_ENTER == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS ENTER notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
 
                halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2689,8 +2676,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                                                     NORMAL_EXEC, 0);
                halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS LEAVE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
 
                halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2705,12 +2692,12 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
                return;
 
        if (BTC_LPS_ENABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS ENABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS DISABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
@@ -2753,15 +2740,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_SCAN_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN START notify\n");
                if (!wifi_connected)    /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
                else    /* wifi is connected */
                        btc8723b1ant_action_wifi_conn_scan(btcoexist);
        } else if (BTC_SCAN_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN FINISH notify\n");
                if (!wifi_connected)    /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn(btcoexist);
                else
@@ -2802,12 +2789,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_ASSOCIATE_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT START notify\n");
                btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT FINISH notify\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                   &wifi_connected);
@@ -2830,11 +2817,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA connect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA connect notify\n");
        else
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA disconnect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2855,10 +2842,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x66 = 0x%x\n",
-                 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                 h2c_parameter[2]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x66 = 0x%x\n",
+                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                   h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2900,8 +2887,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
 
        if (BTC_PACKET_DHCP == type ||
            BTC_PACKET_EAPOL == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], special Packet(%d) notify\n", type);
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], special Packet(%d) notify\n", type);
                halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
        }
 }
@@ -2921,19 +2908,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Bt info[%d], length=%d, hex data = [",
-                 rsp_source, length);
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Bt info[%d], length=%d, hex data = [",
+                     rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length - 1)
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x]\n", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x]\n", tmp_buf[i]);
                else
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x, ", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x, ", tmp_buf[i]);
        }
 
        if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2950,8 +2937,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                 * because bt is reset and loss of the info.
                 */
                if (coex_sta->bt_info_ext & BIT1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -2965,8 +2952,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                if (coex_sta->bt_info_ext & BIT3) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
                                halbtc8723b1ant_ignore_wlan_act(btcoexist,
                                                                FORCE_EXEC,
                                                                false);
@@ -3021,30 +3008,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
        /* connection exists but no busy */
        } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
                (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
                if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
                        coex_dm->auto_tdma_adjust = false;
 
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status =
                        BT_8723B_1ANT_BT_STATUS_MAX;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
        }
 
        if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3060,7 +3047,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
        btcoexist->stop_coex_dm = true;
 
@@ -3078,11 +3065,11 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
 
        if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], Pnp notify to SLEEP\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], Pnp notify to SLEEP\n");
                btcoexist->stop_coex_dm = true;
                halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
                                           true);
@@ -3092,8 +3079,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
                halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], Pnp notify to WAKE UP\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], Pnp notify to WAKE UP\n");
                btcoexist->stop_coex_dm = false;
                halbtc8723b1ant_init_hw_config(btcoexist, false);
                halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3103,8 +3090,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 
 void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], *****************Coex DM Reset****************\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], *****************Coex DM Reset****************\n");
 
        halbtc8723b1ant_init_hw_config(btcoexist, false);
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3119,31 +3106,31 @@ void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
        static u8 dis_ver_info_cnt;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], ==========================Periodical===========================\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                         board_info->pg_ant_num, board_info->btdm_ant_num,
-                         board_info->btdm_ant_pos);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                         ((stack_info->profile_notified) ? "Yes" : "No"),
-                         stack_info->hci_version);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                             board_info->pg_ant_num, board_info->btdm_ant_num,
+                             board_info->btdm_ant_pos);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                             stack_info->profile_notified ? "Yes" : "No",
+                             stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                         glcoex_ver_date_8723b_1ant,
-                         glcoex_ver_8723b_1ant, fw_ver,
-                         bt_patch_ver, bt_patch_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                             glcoex_ver_date_8723b_1ant,
+                             glcoex_ver_8723b_1ant, fw_ver,
+                             bt_patch_ver, bt_patch_ver);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
index c43ab59a690a85abd19798af9e7a110fb194543b..5f488ecaef70b890ba049f6a00e82a76b130d8ee 100644 (file)
@@ -72,32 +72,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "[BTCoex], BT Rssi thresh error!!\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -106,14 +102,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -122,31 +116,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh1 +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "stay at Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state "
-                                         "stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -173,36 +162,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-                                 "[BTCoex], wifi RSSI thresh error!!\n");
+                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                   "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -213,16 +194,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -231,36 +208,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "stay at Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state "
-                                         "stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -292,12 +259,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-                 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-                 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                   reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -311,9 +278,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -427,8 +394,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], No BT link exists!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -443,27 +410,27 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (bt_link_info->sco_exist) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], SCO only\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], SCO only\n");
                        algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], HID only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], HID only\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], A2DP only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], A2DP only\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], PAN(HS) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], PAN(HS) only\n");
                                        algorithm =
                                                BT_8723B_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], PAN(EDR) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], PAN(EDR) only\n");
                                        algorithm =
                                                BT_8723B_2ANT_COEX_ALGO_PANEDR;
                                }
@@ -472,21 +439,21 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + HID\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + A2DP ==> SCO\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + A2DP ==> SCO\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + PAN(HS)\n");
                                        algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -494,31 +461,31 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], HID + A2DP\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], HID + A2DP\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + PAN(HS)\n");
                                        algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex],A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex],A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -528,37 +495,32 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + HID + A2DP"
-                                         " ==> HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + "
-                                                 "PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + "
-                                                 "PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + A2DP + "
-                                                 "PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + A2DP + "
-                                                 "PAN(EDR) ==> HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -568,15 +530,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + A2DP + "
-                                                 "PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + A2DP + "
-                                                 "PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                        BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -588,13 +548,11 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], Error!!! SCO + HID"
-                                                 " + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + A2DP +"
-                                                 " PAN(EDR)==>PAN(EDR)+HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -624,17 +582,15 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
        if (wifi_connected) {
                if (bt_hs_on) {
                        if (bt_hs_rssi > 37) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                                         "[BTCoex], Need to decrease bt "
-                                         "power for HS mode!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW,
+                                           "[BTCoex], Need to decrease bt power for HS mode!!\n");
                                ret = true;
                        }
                } else {
                        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                                         "[BTCoex], Need to decrease bt "
-                                         "power for Wifi is connected!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW,
+                                           "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
                                ret = true;
                        }
                }
@@ -653,10 +609,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
         */
        h2c_parameter[0] = dac_swing_lvl;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -671,9 +627,9 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
        if (dec_bt_pwr)
                h2c_parameter[0] |= BIT1;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
-                 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+                   (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -681,15 +637,15 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
                                    bool force_exec, bool dec_bt_pwr)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s Dec BT power = %s\n",
-                 (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s Dec BT power = %s\n",
+                   force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
        coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
-                         coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
                if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
                        return;
@@ -702,17 +658,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                                          bool force_exec, u8 fw_dac_swing_lvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s set FW Dac Swing level = %d\n",
-                 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s set FW Dac Swing level = %d\n",
+                   (force_exec ? "force to" : ""), fw_dac_swing_lvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], preFwDacSwingLvl=%d, "
-                         "curFwDacSwingLvl=%d\n",
-                         coex_dm->pre_fw_dac_swing_lvl,
-                         coex_dm->cur_fw_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+                           coex_dm->pre_fw_dac_swing_lvl,
+                           coex_dm->cur_fw_dac_swing_lvl);
 
                if (coex_dm->pre_fw_dac_swing_lvl ==
                   coex_dm->cur_fw_dac_swing_lvl)
@@ -729,16 +684,16 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 {
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
                /* Resume RF Rx LPF corner */
                /* After initialized, we can use coex_dm->btRf0x1eBackup */
                if (btcoexist->initilized) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                                 "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                                  0xfffff,
                                                  coex_dm->bt_rf0x1e_backup);
@@ -749,18 +704,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
                                   bool force_exec, bool rx_rf_shrink_on)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                 (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
-                 "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
+                   (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+                                                    "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreRfRxLpfShrink=%d, "
-                         "bCurRfRxLpfShrink=%d\n",
-                         coex_dm->pre_rf_rx_lpf_shrink,
-                         coex_dm->cur_rf_rx_lpf_shrink);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+                           coex_dm->pre_rf_rx_lpf_shrink,
+                           coex_dm->cur_rf_rx_lpf_shrink);
 
                if (coex_dm->pre_rf_rx_lpf_shrink ==
                    coex_dm->cur_rf_rx_lpf_shrink)
@@ -788,9 +742,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
        }
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                 (low_penalty_ra ? "ON!!" : "OFF!!"));
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                   (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -799,18 +753,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
                                        bool force_exec, bool low_penalty_ra)
 {
        /*return; */
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn LowPenaltyRA = %s\n",
-                 (force_exec ? "force to" : ""), (low_penalty_ra ?
-                 "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn LowPenaltyRA = %s\n",
+                   (force_exec ? "force to" : ""), (low_penalty_ra ?
+                                                    "ON" : "OFF"));
        coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreLowPenaltyRa=%d, "
-                         "bCurLowPenaltyRa=%d\n",
-                         coex_dm->pre_low_penalty_ra,
-                         coex_dm->cur_low_penalty_ra);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+                           coex_dm->pre_low_penalty_ra,
+                           coex_dm->cur_low_penalty_ra);
 
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
                        return;
@@ -824,8 +777,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
                                           u32 level)
 {
        u8 val = (u8) level;
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -843,20 +796,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
                                   bool force_exec, bool dac_swing_on,
                                   u32 dac_swing_lvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
-                 (force_exec ? "force to" : ""),
-                 (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+                   (force_exec ? "force to" : ""),
+                   (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
        coex_dm->cur_dac_swing_on = dac_swing_on;
        coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
-                         " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
-                         coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
-                         coex_dm->cur_dac_swing_on,
-                         coex_dm->cur_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+                           coex_dm->pre_dac_swing_on,
+                           coex_dm->pre_dac_swing_lvl,
+                           coex_dm->cur_dac_swing_on,
+                           coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -877,8 +830,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
 
        /*  BB AGC Gain Table */
        if (agc_table_en) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB Agc Table On!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB Agc Table On!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -887,8 +840,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB Agc Table Off!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB Agc Table Off!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -901,15 +854,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
        /* RF Gain */
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
        if (agc_table_en) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Agc Table On!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Agc Table On!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x38fff);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x38ffe);
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Agc Table Off!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Agc Table Off!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x380c3);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -920,15 +873,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
 
        if (agc_table_en) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Agc Table On!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Agc Table On!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x38fff);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x38ffe);
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Agc Table Off!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Agc Table Off!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x380c3);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -946,16 +899,17 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
 static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
                                   bool force_exec, bool agc_table_en)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s %s Agc Table\n",
-                 (force_exec ? "force to" : ""),
-                 (agc_table_en ? "Enable" : "Disable"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s %s Agc Table\n",
+                   (force_exec ? "force to" : ""),
+                   (agc_table_en ? "Enable" : "Disable"));
        coex_dm->cur_agc_table_en = agc_table_en;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-                         coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+                           coex_dm->pre_agc_table_en,
+                           coex_dm->cur_agc_table_en);
 
                if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
                        return;
@@ -969,20 +923,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
                                        u32 val0x6c0, u32 val0x6c4,
                                        u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -991,29 +945,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
                                    u32 val0x6c4, u32 val0x6c8,
                                    u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
-                 " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
-                 (force_exec ? "force to" : ""), val0x6c0,
-                 val0x6c4, val0x6c8, val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+                   force_exec ? "force to" : "",
+                   val0x6c0, val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], preVal0x6c0=0x%x, "
-                         "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
-                         "preVal0x6cc=0x%x !!\n",
-                         coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
-                         coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], curVal0x6c0=0x%x, "
-                         "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
-                         "curVal0x6cc=0x%x !!\n",
-                         coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
-                         coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+                           coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+                           coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+                           coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+                           coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1099,9 +1048,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
        if (enable)
                h2c_parameter[0] |= BIT0;/* function enable*/
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set FW for BT Ignore Wlan_Act, "
-                 "FW write 0x63=0x%x\n", h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1109,17 +1058,16 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
 static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                         bool force_exec, bool enable)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn Ignore WlanAct %s\n",
-                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn Ignore WlanAct %s\n",
+                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPreIgnoreWlanAct = %d, "
-                         "bCurIgnoreWlanAct = %d!!\n",
-                         coex_dm->pre_ignore_wlan_act,
-                         coex_dm->cur_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+                           coex_dm->pre_ignore_wlan_act,
+                           coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1147,11 +1095,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
-                 h2c_parameter[0],
-                 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-                 h2c_parameter[3] << 8 | h2c_parameter[4]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+                   h2c_parameter[0],
+                   h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+                   h2c_parameter[3] << 8 | h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1203,7 +1151,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
 
                /* Force GNT_BT to low */
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
-               btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
 
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
                        /* tell firmware "no antenna inverse" */
@@ -1211,19 +1158,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
                        h2c_parameter[1] = 1;  /* ext switch type */
                        btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                h2c_parameter);
+                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
                } else {
                        /* tell firmware "antenna inverse" */
                        h2c_parameter[0] = 1;
                        h2c_parameter[1] = 1;  /* ext switch type */
                        btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                h2c_parameter);
+                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
                }
        }
 
        /* ext switch setting */
        if (use_ext_switch) {
                /* fixed internal switch S1->WiFi, S0->BT */
-               btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+               if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+               else
+                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
                switch (antpos_type) {
                case BTC_ANT_WIFI_AT_MAIN:
                        /* ext switch main at wifi */
@@ -1255,20 +1208,20 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
 static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                                 bool turn_on, u8 type)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-                 (force_exec ? "force to" : ""),
-                 (turn_on ? "ON" : "OFF"), type);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+                   (force_exec ? "force to" : ""),
+                   (turn_on ? "ON" : "OFF"), type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-                         coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-                         coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1466,8 +1419,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non-connected idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non-connected idle!!\n");
 
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
                                          0x0);
@@ -1490,9 +1443,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
 
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi connected + "
-                                 "BT non connected-idle!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
@@ -1518,9 +1470,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
 
                        if (bt_hs_on)
                                return false;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi connected + "
-                                 "BT connected-idle!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi connected + BT connected-idle!!\n");
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
@@ -1544,17 +1495,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                           &low_pwr_disable);
 
                        if (wifi_busy) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], Wifi Connected-Busy + "
-                                         "BT Busy!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                                common = false;
                        } else {
                                if (bt_hs_on)
                                        return false;
 
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], Wifi Connected-Idle + "
-                                         "BT Busy!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
                                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
                                                          0x1, 0xfffff, 0x0);
@@ -1592,9 +1541,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
 {
        /* Set PS TDMA for max interval == 1 */
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1690,9 +1638,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
                        coex_dm->tdma_adj_type = 71;
@@ -1790,9 +1737,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
 {
        /* Set PS TDMA for max interval == 2 */
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
                        coex_dm->tdma_adj_type = 6;
@@ -1873,9 +1819,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                        coex_dm->tdma_adj_type = 2;
@@ -1963,9 +1908,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
 {
        /* Set PS TDMA for max interval == 3 */
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
                        coex_dm->tdma_adj_type = 7;
@@ -2046,9 +1990,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM,
-                         ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
                        coex_dm->tdma_adj_type = 3;
@@ -2140,13 +2083,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        s32 result;
        u8 retry_count = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], TdmaDurationAdjust()\n");
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], TdmaDurationAdjust()\n");
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2250,11 +2193,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        } else {
                /*accquire the BT TRx retry count from BT_Info byte2*/
                retry_count = coex_sta->bt_retry_cnt;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], retry_count = %d\n", retry_count);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
-                         up, dn, m, n, wait_count);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], retry_count = %d\n", retry_count);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+                           up, dn, m, n, wait_count);
                result = 0;
                wait_count++;
                 /* no retry in the last 2-second duration*/
@@ -2271,10 +2214,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Increase wifi "
-                                         "duration!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Increase wifi duration!!\n");
                        } /* <=3 retry in the last 2-second duration*/
                } else if (retry_count <= 3) {
                        up--;
@@ -2297,10 +2238,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Decrease wifi duration "
-                                         "for retry_counter<3!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
                        }
                } else {
                        if (wait_count == 1)
@@ -2316,13 +2255,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], Decrease wifi duration "
-                                 "for retry_counter>3!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
                }
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], max Interval = %d\n", max_interval);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        set_tdma_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2336,10 +2274,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
         */
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool scan = false, link = false, roam = false;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], PsTdma type dismatch!!!, "
-                         "curPsTdma=%d, recordPsTdma=%d\n",
-                         coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2349,9 +2286,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
                                             coex_dm->tdma_adj_type);
                else
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], roaming/link/scan is under"
-                                 " progress, will adjust next time!!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
        }
 }
 
@@ -2989,27 +2925,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        u8 algorithm = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], RunCoexistMechanism()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), "
-                         "return for Manual CTRL <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is under IPS !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        algorithm = btc8723b2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BT is under inquiry/page scan !!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BT is under inquiry/page scan !!\n");
                btc8723b2ant_action_bt_inquiry(btcoexist);
                return;
        } else {
@@ -3021,84 +2956,75 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        }
 
        coex_dm->cur_algorithm = algorithm;
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
-                 coex_dm->cur_algorithm);
+       btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+                   coex_dm->cur_algorithm);
 
        if (btc8723b2ant_is_common_action(btcoexist)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Action 2-Ant common.\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Action 2-Ant common\n");
                coex_dm->auto_tdma_adjust = false;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], preAlgorithm=%d, "
-                                 "curAlgorithm=%d\n", coex_dm->pre_algorithm,
-                                 coex_dm->cur_algorithm);
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+                                   coex_dm->pre_algorithm,
+                                   coex_dm->cur_algorithm);
                        coex_dm->auto_tdma_adjust = false;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8723B_2ANT_COEX_ALGO_SCO:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = SCO\n");
                        btc8723b2ant_action_sco(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID\n");
                        btc8723b2ant_action_hid(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
                        btc8723b2ant_action_a2dp(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = A2DP+PAN(HS).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        btc8723b2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
                        btc8723b2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = HS mode.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
                        btc8723b2ant_action_pan_hs(btcoexist);
-                               break;
+                       break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = PAN+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
                        btc8723b2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = PAN(EDR)+HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        btc8723b2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = HID+A2DP+PAN.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = HID+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
                        btc8723b2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, "
-                                 "algorithm = coexist All Off!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
                        btc8723b2ant_coex_alloff(btcoexist);
                        break;
                }
@@ -3126,8 +3052,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
        u8 u8tmp = 0;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], 2Ant Init HW Config!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], 2Ant Init HW Config!!\n");
        coex_dm->bt_rf0x1e_backup =
                btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
 
@@ -3152,8 +3078,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], Coex Mechanism Init!!\n");
        btc8723b2ant_init_coex_dm(btcoexist);
 }
 
@@ -3388,15 +3314,15 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
 void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_IPS_ENTER == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS ENTER notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                btc8723b2ant_wifioff_hwcfg(btcoexist);
                btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
                btc8723b2ant_coex_alloff(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS LEAVE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
                ex_btc8723b2ant_init_hwconfig(btcoexist);
                btc8723b2ant_init_coex_dm(btcoexist);
@@ -3407,12 +3333,12 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_LPS_ENABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS ENABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS DISABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
@@ -3420,21 +3346,21 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_SCAN_START == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN START notify\n");
        else if (BTC_SCAN_FINISH == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_ASSOCIATE_START == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT START notify\n");
        else if (BTC_ASSOCIATE_FINISH == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3445,11 +3371,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
        u8 wifi_central_chnl;
 
        if (BTC_MEDIA_CONNECT == type)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA connect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA connect notify\n");
        else
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA disconnect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist,
@@ -3470,10 +3396,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x66=0x%x\n",
-                 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                 h2c_parameter[2]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x66=0x%x\n",
+                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                   h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3482,8 +3408,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                           u8 type)
 {
        if (type == BTC_PACKET_DHCP)
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], DHCP Packet notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3501,25 +3427,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Bt info[%d], length=%d, hex data=[",
-                 rsp_source, length);
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Bt info[%d], length=%d, hex data=[",
+                     rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
                if (i == 1)
                        bt_info = tmpbuf[i];
                if (i == length-1)
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x]\n", tmpbuf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x]\n", tmpbuf[i]);
                else
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x, ", tmpbuf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x, ", tmpbuf[i]);
        }
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), "
-                         "return for Manual CTRL<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
                return;
        }
 
@@ -3537,9 +3462,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                     because bt is reset and loss of the info.
                 */
                if ((coex_sta->bt_info_ext & BIT1)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT ext info bit1 check,"
-                                 " send wifi BW&Chnl to BT!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -3553,9 +3477,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                }
 
                if ((coex_sta->bt_info_ext & BIT3)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT ext info bit3 check, "
-                                 "set BT NOT to ignore Wlan active!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
                        btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
                                                     false);
                } else {
@@ -3608,28 +3531,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), "
-                         "BT Non-Connected idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
        /* connection exists but no busy */
        } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
                   (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), "
-                         "BT Non-Defined state!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3652,7 +3573,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
        btc8723b2ant_wifioff_hwcfg(btcoexist);
        btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3666,33 +3587,31 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
        static u8 dis_ver_info_cnt;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], =========================="
-                 "Periodical===========================\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************"
-                         "************************************\n");
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], Ant PG Num/ Ant Mech/ "
-                         "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
-                         board_info->btdm_ant_num, board_info->btdm_ant_pos);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                         ((stack_info->profile_notified) ? "Yes" : "No"),
-                         stack_info->hci_version);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                             board_info->pg_ant_num,
+                             board_info->btdm_ant_num,
+                             board_info->btdm_ant_pos);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                             stack_info->profile_notified ? "Yes" : "No",
+                             stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                         glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-                         fw_ver, bt_patch_ver, bt_patch_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], *****************************"
-                         "***********************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                             glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+                             fw_ver, bt_patch_ver, bt_patch_ver);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
index 9cecf174a37d18bde51434e2604e0fbe1653b519..3ce47c70bfa49935cc1c98140a9e2d1827c6a959 100644 (file)
@@ -76,28 +76,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "[BTCoex], BT Rssi thresh error!!\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -106,12 +106,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +120,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh1 +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -165,32 +165,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-                                 "[BTCoex], wifi RSSI thresh error!!\n");
+                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                   "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -201,14 +197,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                        BTC_RSSI_STATE_MEDIUM) ||
@@ -218,31 +212,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                            (rssi_thresh1 +
                             BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -431,9 +420,9 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
 
        h2c_parameter[0] |= BIT0;       /* trigger*/
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -504,8 +493,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], No BT link exists!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -520,26 +509,26 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (bt_link_info->sco_exist) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT Profile = SCO only\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT Profile = SCO only\n");
                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = HID only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = HID only\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = A2DP only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = A2DP only\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = PAN(HS) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = PAN(HS) only\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = PAN(EDR) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = PAN(EDR) only\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
                                }
                        }
@@ -547,50 +536,50 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + HID\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = HID + A2DP\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = HID + A2DP\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
                                }
                        }
@@ -599,29 +588,29 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->hid_exist &&
                                bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -630,12 +619,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
                        }
@@ -646,12 +635,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
 
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -670,10 +659,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
        if (enable_auto_report)
                h2c_parameter[0] |= BIT0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                 (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                  h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                   (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -682,17 +671,16 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
                                           bool force_exec,
                                           bool enable_auto_report)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM,
-                 ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
-                 (force_exec ? "force to" : ""), ((enable_auto_report) ?
-                 "Enabled" : "Disabled"));
+       btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
+                   (force_exec ? "force to" : ""), ((enable_auto_report) ?
+                                                    "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_auto_report;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-                         coex_dm->pre_bt_auto_report,
-                         coex_dm->cur_bt_auto_report);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+                           coex_dm->pre_bt_auto_report,
+                           coex_dm->cur_bt_auto_report);
 
                if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
                        return;
@@ -718,9 +706,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;        /*MCS5 or OFDM36*/
        }
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                 (low_penalty_ra ? "ON!!" : "OFF!!"));
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                   (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -743,20 +731,20 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -764,10 +752,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
                                       bool force_exec, u32 val0x6c0,
                                       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-                 (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
-                 val0x6c8, val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+                   (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+                   val0x6c8, val0x6cc);
        coex_dm->cur_val_0x6c0 = val0x6c0;
        coex_dm->cur_val_0x6c4 = val0x6c4;
        coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -839,9 +827,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
        if (enable)
                h2c_parameter[0] |= BIT0;       /* function enable*/
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -849,16 +837,16 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn Ignore WlanAct %s\n",
-                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn Ignore WlanAct %s\n",
+                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-                         coex_dm->pre_ignore_wlan_act,
-                         coex_dm->cur_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+                           coex_dm->pre_ignore_wlan_act,
+                           coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -887,13 +875,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
-                 h2c_parameter[0],
-                 h2c_parameter[1]<<24 |
-                 h2c_parameter[2]<<16 |
-                 h2c_parameter[3]<<8 |
-                 h2c_parameter[4]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+                   h2c_parameter[0],
+                   h2c_parameter[1] << 24 |
+                   h2c_parameter[2] << 16 |
+                   h2c_parameter[3] << 8 |
+                   h2c_parameter[4]);
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
@@ -910,22 +898,22 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
                                     bool force_exec, u8 lps_val, u8 rpwm_val)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-                 (force_exec ? "force to" : ""), lps_val, rpwm_val);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+                   (force_exec ? "force to" : ""), lps_val, rpwm_val);
        coex_dm->cur_lps = lps_val;
        coex_dm->cur_rpwm = rpwm_val;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
-                         coex_dm->cur_lps, coex_dm->cur_rpwm);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+                           coex_dm->cur_lps, coex_dm->cur_rpwm);
 
                if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
                    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
-                                 coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+                                   coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
                        return;
                }
@@ -939,8 +927,8 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
                                         bool low_penalty_ra)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
        halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1036,13 +1024,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
 
        if (!force_exec) {
                if (coex_dm->cur_ps_tdma_on) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], ********** TDMA(on, %d) **********\n",
-                                 coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], ********** TDMA(on, %d) **********\n",
+                                   coex_dm->cur_ps_tdma);
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], ********** TDMA(off, %d) **********\n",
-                                 coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], ********** TDMA(off, %d) **********\n",
+                                   coex_dm->cur_ps_tdma);
                }
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1253,50 +1241,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
        if (!wifi_connected &&
            BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
            coex_dm->bt_status) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi connected + BT non connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
                   coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi connected + BT connected-idle!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi connected + BT connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
                    coex_dm->bt_status)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else {
                if (wifi_busy) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
                }
 
                common = false;
@@ -1313,8 +1301,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        long                    result;
        u8                      retry_count = 0, bt_info_ext;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], TdmaDurationAdjustForAcl()\n");
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
        if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
             wifi_status) ||
@@ -1342,8 +1330,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
                halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                coex_dm->tdma_adj_type = 2;
@@ -1378,9 +1366,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Increase wifi duration!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        /* <=3 retry in the last 2-second duration*/
@@ -1410,9 +1397,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        /* retry count > 3, if retry count > 3 happens once,
@@ -1433,8 +1419,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
                if (result == -1) {
@@ -1479,9 +1465,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        }
                } else {
                        /*no change*/
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], ********** TDMA(on, %d) **********\n",
-                               coex_dm->cur_ps_tdma);
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], ********** TDMA(on, %d) **********\n",
+                                   coex_dm->cur_ps_tdma);
                }
 
                if (coex_dm->cur_ps_tdma != 1 &&
@@ -1603,27 +1589,27 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is enabled !!\n");
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], bt all counters = 0, %d times!!\n",
-                         bt_disable_cnt);
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], bt all counters = 0, %d times!!\n",
+                           bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                                 "[BTCoex], BT is disabled !!\n");
+                       btc_alg_dbg(ALGO_BT_MONITOR,
+                                   "[BTCoex], BT is disabled !!\n");
                        halbtc8821a1ant_action_wifi_only(btcoexist);
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is from %s to %s!!\n",
-                       (pre_bt_disabled ? "disabled" : "enabled"),
-                       (bt_disabled ? "disabled" : "enabled"));
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is from %s to %s!!\n",
+                           (pre_bt_disabled ? "disabled" : "enabled"),
+                           (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
                if (bt_disabled) {
                        btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
@@ -1897,15 +1883,15 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        bool    scan = false, link = false, roam = false;
        bool    under_4way = false;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], CoexForWifiConnect()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], CoexForWifiConnect()===>\n");
 
        btcoexist->btc_get(btcoexist,
                 BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
        if (under_4way) {
                btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
                return;
        }
 
@@ -1914,8 +1900,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        if (scan || link || roam) {
                halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
                return;
        }
 
@@ -1976,58 +1962,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
        if (!halbtc8821a1ant_is_common_action(btcoexist)) {
                switch (coex_dm->cur_algorithm) {
                case BT_8821A_1ANT_COEX_ALGO_SCO:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = SCO.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = SCO\n");
                        halbtc8821a1ant_action_sco(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID\n");
                        halbtc8821a1ant_action_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = A2DP\n");
                        halbtc8821a1ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
                        halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN(EDR)\n");
                        halbtc8821a1ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HS mode.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HS mode\n");
                        halbtc8821a1ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN+A2DP\n");
                        halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
                        halbtc8821a1ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
                        btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = HID+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = HID+A2DP\n");
                        halbtc8821a1ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action algorithm = coexist All Off!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action algorithm = coexist All Off!!\n");
                        /*halbtc8821a1ant_coex_all_off(btcoexist);*/
                        break;
                }
@@ -2045,31 +2031,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        u8      wifi_rssi_state = BTC_RSSI_STATE_HIGH;
        bool    wifi_under_5g = false;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], RunCoexistMechanism()===>\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (btcoexist->stop_coex_dm) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is under IPS !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        if (wifi_under_5g) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
                halbtc8821a1ant_coex_under_5g(btcoexist);
                return;
        }
@@ -2135,8 +2121,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (!wifi_connected) {
                bool    scan = false, link = false, roam = false;
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], wifi is non connected-idle !!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], wifi is non connected-idle !!!\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2168,8 +2154,8 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
        u8      u1_tmp = 0;
        bool    wifi_under_5g = false;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], 1Ant Init HW Config!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], 1Ant Init HW Config!!\n");
 
        if (back_up) {
                coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2220,8 +2206,8 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], Coex Mechanism Init!!\n");
 
        btcoexist->stop_coex_dm = false;
 
@@ -2515,8 +2501,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                return;
 
        if (BTC_IPS_ENTER == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS ENTER notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8821a1ant_set_ant_path(btcoexist,
                                             BTC_ANT_PATH_BT, false, true);
@@ -2525,8 +2511,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                halbtc8821a1ant_coex_table_with_type(btcoexist,
                                                     NORMAL_EXEC, 0);
        } else if (BTC_IPS_LEAVE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS LEAVE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
 
                halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2539,12 +2525,12 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
                return;
 
        if (BTC_LPS_ENABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS ENABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_Lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS DISABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_Lps = false;
        }
 }
@@ -2574,8 +2560,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_SCAN_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN START notify\n");
                if (!wifi_connected) {
                        /* non-connected scan*/
                        btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2584,8 +2570,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
                        halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
                }
        } else if (BTC_SCAN_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN FINISH notify\n");
                if (!wifi_connected) {
                        /* non-connected scan*/
                        halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2614,12 +2600,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_ASSOCIATE_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT START notify\n");
                btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT FINISH notify\n");
 
                btcoexist->btc_get(btcoexist,
                         BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2645,11 +2631,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA connect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA connect notify\n");
        } else {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA disconnect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA disconnect notify\n");
        }
 
        /* only 2.4G we need to inform bt the chnl mask*/
@@ -2672,9 +2658,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x66 = 0x%x\n",
-                 h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x66 = 0x%x\n",
+                   h2c_parameter[0] << 16 |
+                   h2c_parameter[1] << 8 |
+                   h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2702,8 +2690,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
 
        if (BTC_PACKET_DHCP == type ||
            BTC_PACKET_EAPOL == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], special Packet(%d) notify\n", type);
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], special Packet(%d) notify\n", type);
                btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
        }
 }
@@ -2727,19 +2715,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Bt info[%d], length = %d, hex data = [",
-                 rsp_source, length);
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Bt info[%d], length = %d, hex data = [",
+                     rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1) {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x]\n", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x]\n", tmp_buf[i]);
                } else {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x, ", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x, ", tmp_buf[i]);
                }
        }
 
@@ -2756,8 +2744,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                /* Here we need to resend some wifi info to BT*/
                /* because bt is reset and loss of the info.*/
                if (coex_sta->bt_info_ext & BIT1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist,
                                           BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
@@ -2773,8 +2761,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
                                halbtc8821a1ant_ignore_wlan_act(btcoexist,
                                                                FORCE_EXEC,
                                                                false);
@@ -2782,8 +2770,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                }
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
                if (!(coex_sta->bt_info_ext & BIT4)) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
                        halbtc8821a1ant_bt_auto_report(btcoexist,
                                                       FORCE_EXEC, true);
                }
@@ -2828,28 +2816,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
        } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
                /* connection exists but no busy*/
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
                (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
                if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
                        coex_dm->auto_tdma_adjust = false;
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2866,8 +2854,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Halt notify\n");
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Halt notify\n");
 
        btcoexist->stop_coex_dm = true;
 
@@ -2885,20 +2873,20 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Pnp notify\n");
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Pnp notify\n");
 
        if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], Pnp notify to SLEEP\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], Pnp notify to SLEEP\n");
                btcoexist->stop_coex_dm = true;
                halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
                halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
                                                 0x0, 0x0);
                halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], Pnp notify to WAKE UP\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], Pnp notify to WAKE UP\n");
                btcoexist->stop_coex_dm = false;
                halbtc8821a1ant_init_hw_config(btcoexist, false);
                halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2914,33 +2902,33 @@ ex_halbtc8821a1ant_periodical(
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], ==========================Periodical===========================\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                         board_info->pg_ant_num,
-                         board_info->btdm_ant_num,
-                         board_info->btdm_ant_pos);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                         ((stack_info->profile_notified) ? "Yes" : "No"),
-                         stack_info->hci_version);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                             board_info->pg_ant_num,
+                             board_info->btdm_ant_num,
+                             board_info->btdm_ant_pos);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                             stack_info->profile_notified ? "Yes" : "No",
+                             stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                       glcoex_ver_date_8821a_1ant,
-                       glcoex_ver_8821a_1ant,
-                       fw_ver, bt_patch_ver,
-                       bt_patch_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                             glcoex_ver_date_8821a_1ant,
+                             glcoex_ver_8821a_1ant,
+                             fw_ver, bt_patch_ver,
+                             bt_patch_ver);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
index 044d914291c02b6f518611a5ad8223ef98cc6035..81f843bba77160db94b5467920e3100db299b9c4 100644 (file)
@@ -80,28 +80,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                                   BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
                        if (bt_rssi >= tmp) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                 "[BTCoex], BT Rssi thresh error!!\n");
+                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                   "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -110,12 +110,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +125,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                            (rssi_thresh1 +
                             BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Low\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state switch to Medium\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-                                         "[BTCoex], BT Rssi state stay at High\n");
+                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
+                                           "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -171,32 +171,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-                                 "[BTCoex], wifi RSSI thresh error!!\n");
+                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                   "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -207,14 +203,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -223,31 +217,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= (rssi_thresh1 +
                            BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Low\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state switch to Medium\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_WIFI_RSSI_STATE,
-                                         "[BTCoex], wifi RSSI state stay at High\n");
+                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+                                           "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -279,26 +268,26 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is enabled !!\n");
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], bt all counters = 0, %d times!!\n",
-                         bt_disable_cnt);
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], bt all counters = 0, %d times!!\n",
+                           bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                                 "[BTCoex], BT is disabled !!\n");
+                       btc_alg_dbg(ALGO_BT_MONITOR,
+                                   "[BTCoex], BT is disabled !!\n");
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                         "[BTCoex], BT is from %s to %s!!\n",
-                         (pre_bt_disabled ? "disabled" : "enabled"),
-                         (bt_disabled ? "disabled" : "enabled"));
+               btc_alg_dbg(ALGO_BT_MONITOR,
+                           "[BTCoex], BT is from %s to %s!!\n",
+                           (pre_bt_disabled ? "disabled" : "enabled"),
+                           (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
        }
 }
@@ -324,12 +313,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-                 "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                   reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       btc_alg_dbg(ALGO_BT_MONITOR,
+                   "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -343,9 +332,9 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -368,8 +357,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                stack_info->bt_link_exist = coex_sta->bt_link_exist;
 
        if (!coex_sta->bt_link_exist) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], No profile exists!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], No profile exists!!!\n");
                return algorithm;
        }
 
@@ -384,26 +373,26 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (coex_sta->sco_exist) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], SCO only\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], SCO only\n");
                        algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                } else {
                        if (coex_sta->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], HID only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], HID only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
                        } else if (coex_sta->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], A2DP only\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], A2DP only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
                        } else if (coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], PAN(HS) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], PAN(HS) only\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], PAN(EDR) only\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], PAN(EDR) only\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
                                }
                        }
@@ -411,50 +400,50 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (coex_sta->sco_exist) {
                        if (coex_sta->hid_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + HID\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + A2DP ==> SCO\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + A2DP ==> SCO\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
                } else {
                        if (coex_sta->hid_exist &&
                            coex_sta->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], HID + A2DP\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], HID + A2DP\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
                        } else if (coex_sta->hid_exist &&
                                coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + PAN(HS)\n");
                                        algorithm =  BT_8821A_2ANT_COEX_ALGO_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (coex_sta->pan_exist &&
                                coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
                        }
@@ -463,29 +452,29 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (coex_sta->sco_exist) {
                        if (coex_sta->hid_exist &&
                            coex_sta->a2dp_exist) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                         "[BTCoex], SCO + HID + A2DP ==> HID\n");
+                               btc_alg_dbg(ALGO_TRACE,
+                                           "[BTCoex], SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->hid_exist &&
                                coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (coex_sta->pan_exist &&
                                   coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -494,12 +483,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                            coex_sta->pan_exist &&
                            coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], HID + A2DP + PAN(EDR)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], HID + A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
                        }
@@ -510,12 +499,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                            coex_sta->pan_exist &&
                            coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
 
                                } else {
-                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                                 "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       btc_alg_dbg(ALGO_TRACE,
+                                                   "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -544,15 +533,15 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
        if (wifi_connected) {
                if (bt_hs_on) {
                        if (bt_hs_rssi > 37) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                                         "[BTCoex], Need to decrease bt power for HS mode!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW,
+                                           "[BTCoex], Need to decrease bt power for HS mode!!\n");
                                ret = true;
                        }
                } else {
                        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                                         "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW,
+                                           "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
                                ret = true;
                        }
                }
@@ -570,10 +559,10 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
         */
        h2c_parameter[0] = dac_swing_lvl;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -588,9 +577,9 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
        if (dec_bt_pwr)
                h2c_parameter[0] |= BIT1;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
-                 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+                   (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -598,16 +587,16 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
                                       bool force_exec, bool dec_bt_pwr)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s Dec BT power = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((dec_bt_pwr) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s Dec BT power = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((dec_bt_pwr) ? "ON" : "OFF"));
        coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
-                         coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
                if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
                        return;
@@ -627,10 +616,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
        if (bt_lna_cons_on)
                h2c_parameter[1] |= BIT0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
-                 (bt_lna_cons_on ? "ON!!" : "OFF!!"),
-                 h2c_parameter[0]<<8|h2c_parameter[1]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+                   bt_lna_cons_on ? "ON!!" : "OFF!!",
+                   h2c_parameter[0] << 8 | h2c_parameter[1]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -638,17 +627,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
 static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
                                       bool force_exec, bool bt_lna_cons_on)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s BT Constrain = %s\n",
-                 (force_exec ? "force" : ""),
-                 ((bt_lna_cons_on) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s BT Constrain = %s\n",
+                   (force_exec ? "force" : ""),
+                   ((bt_lna_cons_on) ? "ON" : "OFF"));
        coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
-                         coex_dm->pre_bt_lna_constrain,
-                         coex_dm->cur_bt_lna_constrain);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+                           coex_dm->pre_bt_lna_constrain,
+                           coex_dm->cur_bt_lna_constrain);
 
                if (coex_dm->pre_bt_lna_constrain ==
                    coex_dm->cur_bt_lna_constrain)
@@ -669,10 +658,10 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
 
        h2c_parameter[1] = bt_psd_mode;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
-                 h2c_parameter[1],
-                 h2c_parameter[0]<<8|h2c_parameter[1]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+                   h2c_parameter[1],
+                   h2c_parameter[0] << 8 | h2c_parameter[1]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -680,15 +669,15 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
                                            bool force_exec, u8 bt_psd_mode)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s BT PSD mode = 0x%x\n",
-                 (force_exec ? "force" : ""), bt_psd_mode);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s BT PSD mode = 0x%x\n",
+                   (force_exec ? "force" : ""), bt_psd_mode);
        coex_dm->cur_bt_psd_mode = bt_psd_mode;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
-                         coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+                           coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
 
                if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
                        return;
@@ -709,10 +698,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
        if (enable_auto_report)
                h2c_parameter[0] |= BIT0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                 (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                   (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+                   h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -721,17 +710,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
                                           bool force_exec,
                                           bool enable_auto_report)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s BT Auto report = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((enable_auto_report) ? "Enabled" : "Disabled"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s BT Auto report = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((enable_auto_report) ? "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_auto_report;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-                         coex_dm->pre_bt_auto_report,
-                         coex_dm->cur_bt_auto_report);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+                           coex_dm->pre_bt_auto_report,
+                           coex_dm->cur_bt_auto_report);
 
                if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
                        return;
@@ -746,16 +735,16 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                                             bool force_exec,
                                             u8 fw_dac_swing_lvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s set FW Dac Swing level = %d\n",
-                 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s set FW Dac Swing level = %d\n",
+                   (force_exec ? "force to" : ""), fw_dac_swing_lvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
-                         coex_dm->pre_fw_dac_swing_lvl,
-                         coex_dm->cur_fw_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+                           coex_dm->pre_fw_dac_swing_lvl,
+                           coex_dm->cur_fw_dac_swing_lvl);
 
                if (coex_dm->pre_fw_dac_swing_lvl ==
                    coex_dm->cur_fw_dac_swing_lvl)
@@ -773,8 +762,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 {
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
@@ -782,8 +771,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                 * After initialized, we can use coex_dm->bt_rf0x1e_backup
                 */
                if (btcoexist->initilized) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                                 "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
                                                  0x1e, 0xfffff,
                                                   coex_dm->bt_rf0x1e_backup);
@@ -794,17 +783,17 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
                                     bool force_exec, bool rx_rf_shrink_on)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((rx_rf_shrink_on) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((rx_rf_shrink_on) ? "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
-                         coex_dm->pre_rf_rx_lpf_shrink,
-                         coex_dm->cur_rf_rx_lpf_shrink);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+                           coex_dm->pre_rf_rx_lpf_shrink,
+                           coex_dm->cur_rf_rx_lpf_shrink);
 
                if (coex_dm->pre_rf_rx_lpf_shrink ==
                    coex_dm->cur_rf_rx_lpf_shrink)
@@ -835,9 +824,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;
        }
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                 (low_penalty_ra ? "ON!!" : "OFF!!"));
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                   (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -846,17 +835,17 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
                                           bool force_exec, bool low_penalty_ra)
 {
        /*return;*/
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn LowPenaltyRA = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((low_penalty_ra) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn LowPenaltyRA = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((low_penalty_ra) ? "ON" : "OFF"));
        coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
-                         coex_dm->pre_low_penalty_ra,
-                         coex_dm->cur_low_penalty_ra);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+                           coex_dm->pre_low_penalty_ra,
+                           coex_dm->cur_low_penalty_ra);
 
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
                        return;
@@ -872,8 +861,8 @@ static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
 {
        u8 val = (u8)level;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
 }
 
@@ -891,21 +880,21 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
                                      bool force_exec, bool dac_swing_on,
                                      u32 dac_swing_lvl)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
-                 (force_exec ? "force to" : ""),
-                 ((dac_swing_on) ? "ON" : "OFF"),
-                 dac_swing_lvl);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+                   (force_exec ? "force to" : ""),
+                   ((dac_swing_on) ? "ON" : "OFF"),
+                   dac_swing_lvl);
        coex_dm->cur_dac_swing_on = dac_swing_on;
        coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
-                         coex_dm->pre_dac_swing_on,
-                         coex_dm->pre_dac_swing_lvl,
-                         coex_dm->cur_dac_swing_on,
-                         coex_dm->cur_dac_swing_lvl);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+                           coex_dm->pre_dac_swing_on,
+                           coex_dm->pre_dac_swing_lvl,
+                           coex_dm->cur_dac_swing_on,
+                           coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl ==
@@ -924,12 +913,12 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
                                             bool adc_back_off)
 {
        if (adc_back_off) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB BackOff Level On!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB BackOff Level On!\n");
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                         "[BTCoex], BB BackOff Level Off!\n");
+               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                           "[BTCoex], BB BackOff Level Off!\n");
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
        }
 }
@@ -937,16 +926,17 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
                                         bool force_exec, bool adc_back_off)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s turn AdcBackOff = %s\n",
-                 (force_exec ? "force to" : ""),
-                 ((adc_back_off) ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s turn AdcBackOff = %s\n",
+                   (force_exec ? "force to" : ""),
+                   ((adc_back_off) ? "ON" : "OFF"));
        coex_dm->cur_adc_back_off = adc_back_off;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
-                         coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+                           coex_dm->pre_adc_back_off,
+                           coex_dm->cur_adc_back_off);
 
                if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
                        return;
@@ -960,20 +950,20 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-                 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -981,28 +971,28 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
                                       bool force_exec, u32 val0x6c0,
                                       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-                 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-                 (force_exec ? "force to" : ""),
-                 val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+       btc_alg_dbg(ALGO_TRACE_SW,
+                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+                   (force_exec ? "force to" : ""),
+                   val0x6c0, val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
-                         coex_dm->pre_val0x6c0,
-                         coex_dm->pre_val0x6c4,
-                         coex_dm->pre_val0x6c8,
-                         coex_dm->pre_val0x6cc);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-                         "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
-                         coex_dm->cur_val0x6c0,
-                         coex_dm->cur_val0x6c4,
-                         coex_dm->cur_val0x6c8,
-                         coex_dm->cur_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+                           coex_dm->pre_val0x6c0,
+                           coex_dm->pre_val0x6c4,
+                           coex_dm->pre_val0x6c8,
+                           coex_dm->pre_val0x6cc);
+               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+                           "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+                           coex_dm->cur_val0x6c0,
+                           coex_dm->cur_val0x6c4,
+                           coex_dm->cur_val0x6c8,
+                           coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1027,9 +1017,9 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
        if (enable)
                h2c_parameter[0] |= BIT0;/* function enable */
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                 h2c_parameter[0]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                   h2c_parameter[0]);
 
        btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
 }
@@ -1037,16 +1027,16 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
 static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn Ignore WlanAct %s\n",
-                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn Ignore WlanAct %s\n",
+                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-                         coex_dm->pre_ignore_wlan_act,
-                         coex_dm->cur_ignore_wlan_act);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+                           coex_dm->pre_ignore_wlan_act,
+                           coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1075,13 +1065,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-                 h2c_parameter[0],
-                 h2c_parameter[1]<<24|
-                 h2c_parameter[2]<<16|
-                 h2c_parameter[3]<<8|
-                 h2c_parameter[4]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+                   h2c_parameter[0],
+                   h2c_parameter[1] << 24 |
+                   h2c_parameter[2] << 16 |
+                   h2c_parameter[3] << 8 |
+                   h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1175,20 +1165,20 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], %s turn %s PS TDMA, type = %d\n",
-                 (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
-                 type);
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+                   (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+                   type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
-                         coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
-                         coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,8 +1364,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi IPS + BT IPS!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi IPS + BT IPS!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1392,13 +1382,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                                   &low_pwr_disable);
 
                if (wifi_busy) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Busy + BT IPS!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Busy + BT IPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi LPS + BT IPS!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi LPS + BT IPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                }
@@ -1416,8 +1406,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi IPS + BT LPS!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi IPS + BT LPS!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1433,13 +1423,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                        BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
                if (wifi_busy) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Busy + BT LPS!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Busy + BT LPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi LPS + BT LPS!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi LPS + BT LPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                }
@@ -1458,8 +1448,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist,
                        BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Wifi IPS + BT Busy!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Wifi IPS + BT Busy!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1478,12 +1468,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                                   &low_pwr_disable);
 
                if (wifi_busy) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi Busy + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi Busy + BT Busy!!\n");
                        common = false;
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Wifi LPS + BT Busy!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Wifi LPS + BT Busy!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                NORMAL_EXEC, true, 21);
 
@@ -1505,8 +1495,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1601,8 +1591,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 71);
@@ -1706,8 +1696,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 6);
@@ -1796,8 +1786,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 2);
@@ -1892,8 +1882,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
        if (tx_pause) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 1\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 7);
@@ -1982,8 +1972,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], TxPause = 0\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 3);
@@ -2085,13 +2075,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        int             result;
        u8              retry_count = 0;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-                 "[BTCoex], TdmaDurationAdjust()\n");
+       btc_alg_dbg(ALGO_TRACE_FW,
+                   "[BTCoex], TdmaDurationAdjust()\n");
 
        if (coex_dm->reset_tdma_adjust) {
                coex_dm->reset_tdma_adjust = false;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2195,11 +2185,11 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        } else {
                /* accquire the BT TRx retry count from BT_Info byte2 */
                retry_count = coex_sta->bt_retry_cnt;
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], retry_count = %d\n", retry_count);
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
-                         (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], retry_count = %d\n", retry_count);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+                           (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
                result = 0;
                wait_count++;
 
@@ -2220,9 +2210,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Increase wifi duration!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        /* <=3 retry in the last 2-second duration */
@@ -2251,9 +2240,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               BTC_PRINT(BTC_MSG_ALGORITHM,
-                                         ALGO_TRACE_FW_DETAIL,
-                                         "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        /* retry count > 3, if retry count > 3 happens once,
@@ -2274,12 +2262,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], max Interval = %d\n", max_interval);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        btc8821a2_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2295,9 +2283,9 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool    scan = false, link = false, roam = false;
 
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                         "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
-                         coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                           "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2307,8 +2295,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
                                                coex_dm->tdma_adj_type);
                } else {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-                                 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
                }
        }
 
@@ -3183,8 +3171,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        u8      algorithm = 0;
 
        if (btcoexist->manual_control) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Manual control!!!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Manual control!!!\n");
                return;
        }
 
@@ -3192,8 +3180,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
        if (wifi_under_5g) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
                halbtc8821a2ant_coex_under_5g(btcoexist);
                return;
        }
@@ -3201,81 +3189,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], BT is under inquiry/page scan !!\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], BT is under inquiry/page scan !!\n");
                halbtc8821a2ant_bt_inquiry_page(btcoexist);
                return;
        }
 
        coex_dm->cur_algorithm = algorithm;
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
        if (halbtc8821a2ant_is_common_action(btcoexist)) {
-               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                         "[BTCoex], Action 2-Ant common.\n");
+               btc_alg_dbg(ALGO_TRACE,
+                           "[BTCoex], Action 2-Ant common\n");
                coex_dm->reset_tdma_adjust = true;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
-                       coex_dm->pre_algorithm, coex_dm->cur_algorithm);
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+                                   coex_dm->pre_algorithm,
+                                   coex_dm->cur_algorithm);
                        coex_dm->reset_tdma_adjust = true;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8821A_2ANT_COEX_ALGO_SCO:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = SCO\n");
                        halbtc8821a2ant_action_sco(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID\n");
                        halbtc8821a2ant_action_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
                        halbtc8821a2ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
                        halbtc8821a2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANHS:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = HS mode.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
                        halbtc8821a2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
                        halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        halbtc8821a2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
                        halbtc8821a2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                                 "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+                       btc_alg_dbg(ALGO_TRACE,
+                                   "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
                        halbtc8821a2ant_coex_all_off(btcoexist);
                        break;
                }
@@ -3294,8 +3283,8 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
        u8 u1tmp = 0;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], 2Ant Init HW Config!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], 2Ant Init HW Config!!\n");
 
        /* backup rf 0x1e value */
        coex_dm->bt_rf0x1e_backup =
@@ -3328,8 +3317,8 @@ ex_halbtc8821a2ant_init_coex_dm(
        struct btc_coexist *btcoexist
        )
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc_iface_dbg(INTF_INIT,
+                     "[BTCoex], Coex Mechanism Init!!\n");
 
        halbtc8821a2ant_init_coex_dm(btcoexist);
 }
@@ -3574,13 +3563,13 @@ ex_halbtc8821a2ant_display_coex_info(
 void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_IPS_ENTER == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS ENTER notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8821a2ant_coex_all_off(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], IPS LEAVE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
                /*halbtc8821a2ant_init_coex_dm(btcoexist);*/
        }
@@ -3589,12 +3578,12 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_LPS_ENABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS ENABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], LPS DISABLE notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
@@ -3602,22 +3591,22 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_SCAN_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN START notify\n");
        } else if (BTC_SCAN_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], SCAN FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], SCAN FINISH notify\n");
        }
 }
 
 void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        if (BTC_ASSOCIATE_START == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT START notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT START notify\n");
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], CONNECT FINISH notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], CONNECT FINISH notify\n");
        }
 }
 
@@ -3629,11 +3618,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
        u8      wifi_central_chnl;
 
        if (BTC_MEDIA_CONNECT == type) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA connect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA connect notify\n");
        } else {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], MEDIA disconnect notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], MEDIA disconnect notify\n");
        }
 
        /* only 2.4G we need to inform bt the chnl mask*/
@@ -3654,9 +3643,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-                 "[BTCoex], FW write 0x66 = 0x%x\n",
-               h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+                   "[BTCoex], FW write 0x66 = 0x%x\n",
+                   h2c_parameter[0] << 16 |
+                   h2c_parameter[1] << 8 |
+                   h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3664,8 +3655,8 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type) {
        if (type == BTC_PACKET_DHCP) {
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                         "[BTCoex], DHCP Packet notify\n");
+               btc_iface_dbg(INTF_NOTIFY,
+                             "[BTCoex], DHCP Packet notify\n");
        }
 }
 
@@ -3685,19 +3676,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Bt info[%d], length = %d, hex data = [",
-                 rsp_source, length);
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Bt info[%d], length = %d, hex data = [",
+                     rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1) {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x]\n", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x]\n", tmp_buf[i]);
                } else {
-                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                                 "0x%02x, ", tmp_buf[i]);
+                       btc_iface_dbg(INTF_NOTIFY,
+                                     "0x%02x, ", tmp_buf[i]);
                }
        }
 
@@ -3823,8 +3814,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-                 "[BTCoex], Halt notify\n");
+       btc_iface_dbg(INTF_NOTIFY,
+                     "[BTCoex], Halt notify\n");
 
        halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
        ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3837,31 +3828,31 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "[BTCoex], ==========================Periodical===========================\n");
+       btc_alg_dbg(ALGO_TRACE,
+                   "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                         board_info->pg_ant_num,
-                         board_info->btdm_ant_num,
-                         board_info->btdm_ant_pos);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                         ((stack_info->profile_notified) ? "Yes" : "No"),
-                         stack_info->hci_version);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                             board_info->pg_ant_num,
+                             board_info->btdm_ant_num,
+                             board_info->btdm_ant_pos);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                             stack_info->profile_notified ? "Yes" : "No",
+                             stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                         glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
-                         fw_ver, bt_patch_ver, bt_patch_ver);
-               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-                         "[BTCoex], ****************************************************************\n");
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                             glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+                             fw_ver, bt_patch_ver, bt_patch_ver);
+               btc_iface_dbg(INTF_INIT,
+                             "[BTCoex], ****************************************************************\n");
        }
 
        halbtc8821a2ant_query_bt_info(btcoexist);
index b2791c893417b071782dbb317f6baae53c0ecf74..b660c214dc717b086de802f6ae4ada8b4b05dfe8 100644 (file)
@@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
 
        if (rtlphy->current_channel != 0)
                chnl = rtlphy->current_channel;
-       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-                 "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+       btc_alg_dbg(ALGO_TRACE,
+                   "static halbtc_get_wifi_central_chnl:%d\n", chnl);
        return chnl;
 }
 
@@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type)
        }
 }
 
-void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
 {
        if (BT_COEX_ANT_TYPE_PG == type) {
                gl_bt_coexist.board_info.pg_ant_num = ant_num;
                gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+               /* The antenna position:
+                * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
+                * The antenna position should be determined by
+                * auto-detect mechanism.
+                * The following is assumed to main,
+                * and those must be modified
+                * if y auto-detect mechanism is ready
+                */
+               if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
+                   (gl_bt_coexist.board_info.btdm_ant_num == 1))
+                       gl_bt_coexist.board_info.btdm_ant_pos =
+                                                      BTC_ANTENNA_AT_MAIN_PORT;
+               else
+                       gl_bt_coexist.board_info.btdm_ant_pos =
+                                                      BTC_ANTENNA_AT_MAIN_PORT;
        } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
                gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+               gl_bt_coexist.board_info.btdm_ant_pos =
+                                                      BTC_ANTENNA_AT_MAIN_PORT;
+       } else if (type == BT_COEX_ANT_TYPE_DETECTED) {
+               gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+               if (rtlpriv->cfg->mod_params->ant_sel == 1)
+                       gl_bt_coexist.board_info.btdm_ant_pos =
+                               BTC_ANTENNA_AT_AUX_PORT;
+               else
+                       gl_bt_coexist.board_info.btdm_ant_pos =
+                               BTC_ANTENNA_AT_MAIN_PORT;
        }
 }
 
index 0a903ea179ef5df67954b1c33c97a57a5c82eebe..3cbe34c535ec4c1ce4cc3fe2857ea169f890566f 100644 (file)
@@ -116,12 +116,17 @@ extern u32 btc_dbg_type[];
 #define                WIFI_P2P_GO_CONNECTED                   BIT3
 #define                WIFI_P2P_GC_CONNECTED                   BIT4
 
-#define        BTC_PRINT(dbgtype, dbgflag, printstr, ...)              \
-       do {                                                    \
-               if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
-                       printk(printstr, ##__VA_ARGS__);        \
-               }                                               \
-       } while (0)
+#define        btc_alg_dbg(dbgflag, fmt, ...)                                  \
+do {                                                                   \
+       if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag))        \
+               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
+} while (0)
+#define        btc_iface_dbg(dbgflag, fmt, ...)                                \
+do {                                                                   \
+       if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag))        \
+               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
+} while (0)
+
 
 #define        BTC_RSSI_HIGH(_rssi_)   \
        ((_rssi_ == BTC_RSSI_STATE_HIGH ||      \
@@ -535,7 +540,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
 void exhalbtc_update_min_bt_rssi(char bt_rssi);
 void exhalbtc_set_bt_exist(bool bt_exist);
 void exhalbtc_set_chip_type(u8 chip_type);
-void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
 void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
 void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
                                  u8 *rssi_wifi, u8 *rssi_bt);
index b9b0cb7af8eace6e357378729e5d8d8628cb282a..d3fd9211b3a48fd1c8b495069fccbb0085dbb140 100644 (file)
@@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
                 __func__, bt_type);
        exhalbtc_set_chip_type(bt_type);
 
-       exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+       if (rtlpriv->cfg->mod_params->ant_sel == 1)
+               exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
+       else
+               exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
 }
 
 void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
index 283d608b9973490e06367f0f0d74a1840f0a21db..1ac41b8bd19a5188f068bcb3ec6b6b90096f961e 100644 (file)
@@ -359,30 +359,28 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        bool find_buddy_priv = false;
-       struct rtl_priv *tpriv = NULL;
+       struct rtl_priv *tpriv;
        struct rtl_pci_priv *tpcipriv = NULL;
 
        if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
                list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
                                    list) {
-                       if (tpriv) {
-                               tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
-                               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-                                        "pcipriv->ndis_adapter.funcnumber %x\n",
-                                       pcipriv->ndis_adapter.funcnumber);
-                               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-                                        "tpcipriv->ndis_adapter.funcnumber %x\n",
-                                       tpcipriv->ndis_adapter.funcnumber);
-
-                               if ((pcipriv->ndis_adapter.busnumber ==
-                                    tpcipriv->ndis_adapter.busnumber) &&
-                                   (pcipriv->ndis_adapter.devnumber ==
-                                   tpcipriv->ndis_adapter.devnumber) &&
-                                   (pcipriv->ndis_adapter.funcnumber !=
-                                   tpcipriv->ndis_adapter.funcnumber)) {
-                                       find_buddy_priv = true;
-                                       break;
-                               }
+                       tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "pcipriv->ndis_adapter.funcnumber %x\n",
+                               pcipriv->ndis_adapter.funcnumber);
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "tpcipriv->ndis_adapter.funcnumber %x\n",
+                               tpcipriv->ndis_adapter.funcnumber);
+
+                       if ((pcipriv->ndis_adapter.busnumber ==
+                            tpcipriv->ndis_adapter.busnumber) &&
+                           (pcipriv->ndis_adapter.devnumber ==
+                           tpcipriv->ndis_adapter.devnumber) &&
+                           (pcipriv->ndis_adapter.funcnumber !=
+                           tpcipriv->ndis_adapter.funcnumber)) {
+                               find_buddy_priv = true;
+                               break;
                        }
                }
        }
@@ -1213,7 +1211,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        /*Tx/Rx related var */
        _rtl_pci_init_trx_var(hw);
 
-       /*IBSS*/ mac->beacon_interval = 100;
+       /*IBSS*/
+       mac->beacon_interval = 100;
 
        /*AMPDU*/
        mac->min_space_cfg = 0;
index 5be34118e0af22b69392f5caf07f9eef5672c7fe..3524441fd51658fb8a0048ff799141c24c549c87 100644 (file)
@@ -154,13 +154,13 @@ static bool _rtl_is_radar_freq(u16 center_freq)
 static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
                                           enum nl80211_reg_initiator initiator)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        const struct ieee80211_reg_rule *reg_rule;
        struct ieee80211_channel *ch;
        unsigned int i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
 
                if (!wiphy->bands[band])
                        continue;
@@ -210,9 +210,9 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
        struct ieee80211_channel *ch;
        const struct ieee80211_reg_rule *reg_rule;
 
-       if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+       if (!wiphy->bands[NL80211_BAND_2GHZ])
                return;
-       sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = wiphy->bands[NL80211_BAND_2GHZ];
 
        /*
         *If no country IE has been received always enable active scan
@@ -262,10 +262,10 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
        struct ieee80211_channel *ch;
        unsigned int i;
 
-       if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+       if (!wiphy->bands[NL80211_BAND_5GHZ])
                return;
 
-       sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+       sband = wiphy->bands[NL80211_BAND_5GHZ];
 
        for (i = 0; i < sband->n_channels; i++) {
                ch = &sband->channels[i];
@@ -301,12 +301,12 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
 
 static void _rtl_dump_channel_map(struct wiphy *wiphy)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        unsigned int i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!wiphy->bands[band])
                        continue;
                sband = wiphy->bands[band];
index ce4da9d79fbd9e44d55eb298379c7de4a28c0eee..db9a7829d568757dcfb5a1d93b08ea96c17a7f80 100644 (file)
@@ -1137,7 +1137,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
        } else {
                RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
                         "Schedule TxPowerTracking !!\n");
-                               dm_txpower_track_cb_therm(hw);
+               dm_txpower_track_cb_therm(hw);
                rtlpriv->dm.tm_trigger = 0;
        }
 }
index a2bb02c7b837679455daf529fc84b5214068a33b..416a9ba6382e8040875ee742cd942335c149a2e6 100644 (file)
@@ -1903,8 +1903,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
        } else {
                rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
        }
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
 }
 
 static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
index 03cbe4cf110b5f42deabe86397fff9fa41101974..316be5ff69ca80fd0a988ab732d2f596a933e0ec 100644 (file)
@@ -240,7 +240,7 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
        ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
        falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
 
-        ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+       ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
        falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
        falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
 
index 24eff8ea4c2e2e28ef8d77136f412d3b4661b0fa..35e6bf7e233d6d71fc359f9649d10a15ac9d284a 100644 (file)
@@ -368,7 +368,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
        status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
        status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
        status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
-               status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+       status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
        status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
 
        status->macid = GET_RX_DESC_MACID(pdesc);
index 4b4612fe2fdbdf318bad0b0a6e36873a6340d60b..881821f4e243878ae75c32728a326c67c70eb81c 100644 (file)
@@ -645,7 +645,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                 rtlpriv->psc.state_inap);
                        ppsc->last_sleep_jiffies = jiffies;
                        _rtl92se_phy_set_rf_sleep(hw);
-           break;
+                       break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
index 00a0531cc5f49a84637f222332a58450ec118ef9..44de695dc9992bdc869bff48110f8443dd395e34 100644 (file)
@@ -134,9 +134,9 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
        if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
                        "Need to decrease bt power\n");
-                       rtlpriv->btcoexist.cstate |=
-                       BT_COEX_STATE_DEC_BT_POWER;
-                       return true;
+               rtlpriv->btcoexist.cstate |=
+               BT_COEX_STATE_DEC_BT_POWER;
+               return true;
        }
 
        rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
index c983d2fe147f2bf1da4923f2ddc8f2bbbfcd90de..5a3df9198ddf7455c68045d8a51aac4b81789bbb 100644 (file)
@@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
                                              bool auto_load_fail, u8 *hwinfo)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
        u8 value;
        u32 tmpu_32;
 
@@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
                rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
        }
 
+       /* override ant_num / ant_path */
+       if (mod_params->ant_sel)
+               rtlpriv->btcoexist.btc_info.ant_num =
+                       (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
 }
 
 void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
index b7b73cbe346da9484601a23bfde76e026fdeceac..445f681d08c03aa36748f97d3e3c5b170d820614 100644 (file)
@@ -1723,8 +1723,8 @@ static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw)
 
        /* Allen 20131125 */
        tmp = (reg_eac & 0x03FF0000) >> 16;
-               if ((tmp & 0x200) > 0)
-                       tmp = 0x400 - tmp;
+       if ((tmp & 0x200) > 0)
+               tmp = 0x400 - tmp;
        /* if Tx is OK, check whether Rx is OK */
        if (!(reg_eac & BIT(27)) &&
            (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -2301,8 +2301,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
        } else {
                rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
        }
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
 }
 
 static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -2606,8 +2605,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                         "IPS Set eRf nic enable\n");
                                rtstatus = rtl_ps_enable_nic(hw);
                        } while (!rtstatus && (initializecount < 10));
-                               RT_CLEAR_PS_LEVEL(ppsc,
-                                                 RT_RF_OFF_LEVL_HALT_NIC);
+                       RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
                } else {
                        RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                                 "Set ERFON sleeped:%d ms\n",
index 5ed4492d3c806abca911f95d8d589280853aef96..97f5a0377e7a05f38cd1211646fa75d514a61992 100644 (file)
@@ -303,8 +303,8 @@ static void _rtl8723be_get_txpower_writeval_by_regulatory(
                                           [chnlgroup][index + (rf ? 8 : 0)] &
                                              (0x7f << (i * 8))) >> (i * 8));
 
-                                       if (pwr_diff_limit[i] > pwr_diff)
-                                               pwr_diff_limit[i] = pwr_diff;
+                               if (pwr_diff_limit[i] > pwr_diff)
+                                       pwr_diff_limit[i] = pwr_diff;
                        }
 
                        customer_limit = (pwr_diff_limit[3] << 24) |
index a78eaeda000801c05dfbe6b6962784cfc3915e40..2101793438ed1832e1faf8e28cb97ac65f0f7824 100644 (file)
@@ -273,6 +273,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
        .msi_support = false,
        .disable_watchdog = false,
        .debug = DBG_EMERG,
+       .ant_sel = 0,
 };
 
 static struct rtl_hal_cfg rtl8723be_hal_cfg = {
@@ -394,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
 module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
                   bool, 0444);
+module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -402,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog,
                 "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
index 95dcbff4673b1490c72fec5ee9bc55bcfa5f7c20..17a681788611a7019ec4d11a88b5914cf1006f4d 100644 (file)
@@ -1957,9 +1957,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
                                rtldm->swing_idx_ofdm_base[p] =
                                        rtldm->swing_idx_ofdm[p];
 
-                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
-                                rtldm->thermalvalue, thermal_value);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+                        rtldm->thermalvalue, thermal_value);
                /*Record last Power Tracking Thermal Value*/
                rtldm->thermalvalue = thermal_value;
        }
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
                for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
                        rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
 
-                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
-                                rtldm->thermalvalue, thermal_value);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+                        rtldm->thermalvalue, thermal_value);
                /*Record last Power Tracking Thermal Value*/
                rtldm->thermalvalue = thermal_value;
        }
index 74165b3eb362ceae01eaded6d600d2b64518235a..0c3b9ce86e2e516152c6feabbac68d41ad162af9 100644 (file)
@@ -418,9 +418,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8      band,
                        out = 0x16A; /* -3 dB */
                }
        } else {
-           u32 swing = 0, swing_a = 0, swing_b = 0;
+               u32 swing = 0, swing_a = 0, swing_b = 0;
 
-           if (band == BAND_ON_2_4G) {
+               if (band == BAND_ON_2_4G) {
                        if (reg_swing_2g == auto_temp) {
                                efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
                                swing = (swing == 0xFF) ? 0x00 : swing;
@@ -514,7 +514,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8      band,
 
        RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
                 "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
-        return out;
+       return out;
 }
 
 void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
@@ -959,7 +959,7 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
 static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
                                                u8 end, u8 base_val)
 {
-       char i = 0;
+       int i;
        u8 temp_value = 0;
        u32 temp_data = 0;
 
index 554d81420f19ecbc598c995dec3bbab86c8558e5..11d9c2307e2f6e0109365f979c5118b0c2ac5274 100644 (file)
@@ -1359,7 +1359,7 @@ struct rtl_mac {
        u32 tx_ss_num;
        u32 rx_ss_num;
 
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
        struct ieee80211_hw *hw;
        struct ieee80211_vif *vif;
        enum nl80211_iftype opmode;
@@ -2246,6 +2246,9 @@ struct rtl_mod_params {
 
        /* default 0: 1 means do not disable interrupts */
        bool int_clear;
+
+       /* select antenna */
+       int ant_sel;
 };
 
 struct rtl_hal_usbint_cfg {
@@ -2867,7 +2870,7 @@ value to host byte ordering.*/
        (ppsc->cur_ps_level |= _ps_flg)
 
 #define container_of_dwork_rtl(x, y, z) \
-       container_of(container_of(x, struct delayed_work, work), y, z)
+       container_of(to_delayed_work(x), y, z)
 
 #define FILL_OCTET_STRING(_os, _octet, _len)   \
                (_os).octet = (u8 *)(_octet);           \
index a13d1f2b5912e09e31a4e1b2834ee5eb2d9825bc..569918c485b454378106eb1a6abc80469a8043c9 100644 (file)
@@ -1291,7 +1291,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
                return 0;
 
        dsconfig = 1000 *
-               ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+               ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 
        len = sizeof(config);
        ret = rndis_query_oid(usbdev,
@@ -3476,7 +3476,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
        priv->band.n_channels = ARRAY_SIZE(rndis_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
-       wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
 
        memcpy(priv->cipher_suites, rndis_cipher_suites,
index 4df992de7d0731508a6c42fccf51096928156e9e..dbb23899ddcb336827a3b0e5b255db9e16d2604d 100644 (file)
 #include "rsi_common.h"
 
 static const struct ieee80211_channel rsi_2ghz_channels[] = {
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
          .hw_value = 1 }, /* Channel 1 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
          .hw_value = 2 }, /* Channel 2 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
          .hw_value = 3 }, /* Channel 3 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
          .hw_value = 4 }, /* Channel 4 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
          .hw_value = 5 }, /* Channel 5 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
          .hw_value = 6 }, /* Channel 6 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
          .hw_value = 7 }, /* Channel 7 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
          .hw_value = 8 }, /* Channel 8 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
          .hw_value = 9 }, /* Channel 9 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
          .hw_value = 10 }, /* Channel 10 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
          .hw_value = 11 }, /* Channel 11 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
          .hw_value = 12 }, /* Channel 12 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
          .hw_value = 13 }, /* Channel 13 */
-       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+       { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
          .hw_value = 14 }, /* Channel 14 */
 };
 
 static const struct ieee80211_channel rsi_5ghz_channels[] = {
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5180,
          .hw_value = 36,  }, /* Channel 36 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5200,
          .hw_value = 40, }, /* Channel 40 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5220,
          .hw_value = 44, }, /* Channel 44 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5240,
          .hw_value = 48, }, /* Channel 48 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5260,
          .hw_value = 52, }, /* Channel 52 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5280,
          .hw_value = 56, }, /* Channel 56 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5300,
          .hw_value = 60, }, /* Channel 60 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5320,
          .hw_value = 64, }, /* Channel 64 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5500,
          .hw_value = 100, }, /* Channel 100 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5520,
          .hw_value = 104, }, /* Channel 104 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5540,
          .hw_value = 108, }, /* Channel 108 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5560,
          .hw_value = 112, }, /* Channel 112 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5580,
          .hw_value = 116, }, /* Channel 116 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5600,
          .hw_value = 120, }, /* Channel 120 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5620,
          .hw_value = 124, }, /* Channel 124 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5640,
          .hw_value = 128, }, /* Channel 128 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5660,
          .hw_value = 132, }, /* Channel 132 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5680,
          .hw_value = 136, }, /* Channel 136 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5700,
          .hw_value = 140, }, /* Channel 140 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5745,
          .hw_value = 149, }, /* Channel 149 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5765,
          .hw_value = 153, }, /* Channel 153 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5785,
          .hw_value = 157, }, /* Channel 157 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5805,
          .hw_value = 161, }, /* Channel 161 */
-       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5825,
          .hw_value = 165, }, /* Channel 165 */
 };
 
@@ -150,12 +150,12 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
        struct ieee80211_supported_band *sbands = &adapter->sbands[band];
        void *channels = NULL;
 
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
                memcpy(channels,
                       rsi_2ghz_channels,
                       sizeof(rsi_2ghz_channels));
-               sbands->band = IEEE80211_BAND_2GHZ;
+               sbands->band = NL80211_BAND_2GHZ;
                sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
                sbands->bitrates = rsi_rates;
                sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
@@ -164,7 +164,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
                memcpy(channels,
                       rsi_5ghz_channels,
                       sizeof(rsi_5ghz_channels));
-               sbands->band = IEEE80211_BAND_5GHZ;
+               sbands->band = NL80211_BAND_5GHZ;
                sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
                sbands->bitrates = &rsi_rates[4];
                sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
@@ -775,7 +775,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
 {
        struct rsi_hw *adapter = hw->priv;
        struct rsi_common *common = adapter->priv;
-       enum ieee80211_band band = hw->conf.chandef.chan->band;
+       enum nl80211_band band = hw->conf.chandef.chan->band;
 
        mutex_lock(&common->mutex);
        common->fixedrate_mask[band] = 0;
@@ -999,8 +999,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
 
        mutex_lock(&common->mutex);
        /* Resetting all the fields to default values */
-       common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
-       common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+       common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
+       common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
        common->min_rate = 0xffff;
        common->vif_info[0].is_ht = false;
        common->vif_info[0].sgi = false;
@@ -1070,8 +1070,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
        hw->max_rate_tries = MAX_RETRIES;
 
        hw->max_tx_aggregation_subframes = 6;
-       rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
-       rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ);
+       rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+       rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
        hw->rate_control_algorithm = "AARF";
 
        SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1087,10 +1087,10 @@ int rsi_mac80211_attach(struct rsi_common *common)
 
        wiphy->available_antennas_rx = 1;
        wiphy->available_antennas_tx = 1;
-       wiphy->bands[IEEE80211_BAND_2GHZ] =
-               &adapter->sbands[IEEE80211_BAND_2GHZ];
-       wiphy->bands[IEEE80211_BAND_5GHZ] =
-               &adapter->sbands[IEEE80211_BAND_5GHZ];
+       wiphy->bands[NL80211_BAND_2GHZ] =
+               &adapter->sbands[NL80211_BAND_2GHZ];
+       wiphy->bands[NL80211_BAND_5GHZ] =
+               &adapter->sbands[NL80211_BAND_5GHZ];
 
        status = ieee80211_register_hw(hw);
        if (status)
index e43b59d5b53bc76c0a3e52bfab5e6bd757334682..40658b62d07780ea1f1aa39cecf59673ca73c16e 100644 (file)
@@ -210,7 +210,7 @@ static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
  */
 static void rsi_set_default_parameters(struct rsi_common *common)
 {
-       common->band = IEEE80211_BAND_2GHZ;
+       common->band = NL80211_BAND_2GHZ;
        common->channel_width = BW_20MHZ;
        common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
        common->channel = 1;
@@ -655,7 +655,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
        vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
        vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
 
-       if (common->band == IEEE80211_BAND_5GHZ) {
+       if (common->band == NL80211_BAND_5GHZ) {
                vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
                if (conf_is_ht40(&common->priv->hw->conf)) {
                        vap_caps->default_ctrl_rate |=
@@ -872,7 +872,7 @@ int rsi_band_check(struct rsi_common *common)
        else
                common->channel_width = BW_40MHZ;
 
-       if (common->band == IEEE80211_BAND_2GHZ) {
+       if (common->band == NL80211_BAND_2GHZ) {
                if (common->channel_width)
                        common->endpoint = EP_2GHZ_40MHZ;
                else
@@ -1046,7 +1046,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
        if (common->channel_width == BW_40MHZ)
                auto_rate->desc_word[7] |= cpu_to_le16(1);
 
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                min_rate = RSI_RATE_1;
                rate_table_offset = 0;
        } else {
index 702593f199971a89c227b3015b243efb203920c4..02920c93e82d837fb72460ab89d81bdab5f17124 100644 (file)
 int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
 {
        struct rsi_hw *adapter = common->priv;
-       struct ieee80211_hdr *tmp_hdr = NULL;
+       struct ieee80211_hdr *tmp_hdr;
        struct ieee80211_tx_info *info;
        struct skb_info *tx_params;
-       struct ieee80211_bss_conf *bss = NULL;
-       int status = -EINVAL;
+       struct ieee80211_bss_conf *bss;
+       int status;
        u8 ieee80211_size = MIN_802_11_HDR_LEN;
-       u8 extnd_size = 0;
+       u8 extnd_size;
        __le16 *frame_desc;
-       u16 seq_num = 0;
+       u16 seq_num;
 
        info = IEEE80211_SKB_CB(skb);
        bss = &info->control.vif->bss_conf;
        tx_params = (struct skb_info *)info->driver_data;
 
-       if (!bss->assoc)
+       if (!bss->assoc) {
+               status = -EINVAL;
                goto err;
+       }
 
        tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
        seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
@@ -123,15 +125,15 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
                      struct sk_buff *skb)
 {
        struct rsi_hw *adapter = common->priv;
-       struct ieee80211_hdr *wh = NULL;
+       struct ieee80211_hdr *wh;
        struct ieee80211_tx_info *info;
-       struct ieee80211_bss_conf *bss = NULL;
+       struct ieee80211_bss_conf *bss;
        struct ieee80211_hw *hw = adapter->hw;
        struct ieee80211_conf *conf = &hw->conf;
        struct skb_info *tx_params;
        int status = -E2BIG;
-       __le16 *msg = NULL;
-       u8 extnd_size = 0;
+       __le16 *msg;
+       u8 extnd_size;
        u8 vap_id = 0;
 
        info = IEEE80211_SKB_CB(skb);
@@ -182,7 +184,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
        if (wh->addr1[0] & BIT(0))
                msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
 
-       if (common->band == IEEE80211_BAND_2GHZ)
+       if (common->band == NL80211_BAND_2GHZ)
                msg[4] = cpu_to_le16(RSI_11B_MODE);
        else
                msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
index 5baed945f60e2cb276eeeaa7a520156132af4a55..dcd095787166e3aa0cf6de80054e2ee5e6134d77 100644 (file)
@@ -211,7 +211,7 @@ struct rsi_hw {
        struct ieee80211_hw *hw;
        struct ieee80211_vif *vifs[RSI_MAX_VIFS];
        struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
-       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 
        struct device *device;
        u8 sc_nvifs;
index 0e51e27d2e3f1bba627d8b9fd5f7c68be7bff662..dc478cedbde0d30f4588634b05fc529976e4ce9d 100644 (file)
@@ -102,7 +102,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
 
 
 #define CHAN2G(_channel, _freq, _flags) {                      \
-       .band                   = IEEE80211_BAND_2GHZ,          \
+       .band                   = NL80211_BAND_2GHZ,            \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -111,7 +111,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
 }
 
 #define CHAN5G(_channel, _flags) {                             \
-       .band                   = IEEE80211_BAND_5GHZ,          \
+       .band                   = NL80211_BAND_5GHZ,            \
        .center_freq    = 5000 + (5 * (_channel)),              \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -311,12 +311,12 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
 
        hw->sta_data_size = sizeof(struct cw1200_sta_priv);
 
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz;
        if (have_5ghz)
-               hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+               hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz;
 
        /* Channel params have to be cleared before registering wiphy again */
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
                if (!sband)
                        continue;
index bff81b8d4164dbc219d468ffa314c710a547badf..983788156bb054046d99644c0298083f26943c6f 100644 (file)
@@ -402,7 +402,7 @@ void cw1200_probe_work(struct work_struct *work)
        }
        wsm = (struct wsm_tx *)frame.skb->data;
        scan.max_tx_rate = wsm->max_tx_rate;
-       scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+       scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
                WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
        if (priv->join_status == CW1200_JOIN_STATUS_STA ||
            priv->join_status == CW1200_JOIN_STATUS_IBSS) {
index d0ddcde6c695fb6c5efd9b2283c76e98328fdbdf..daf06a4f842ed2e787e41390ab92d1364569d860 100644 (file)
@@ -1278,7 +1278,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
        join.dtim_period = priv->join_dtim_period;
 
        join.channel_number = priv->channel->hw_value;
-       join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+       join.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
                WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
 
        memcpy(join.bssid, bssid, sizeof(join.bssid));
@@ -1462,7 +1462,7 @@ int cw1200_enable_listening(struct cw1200_common *priv)
        };
 
        if (priv->channel) {
-               start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+               start.band = priv->channel->band == NL80211_BAND_5GHZ ?
                             WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
                start.channel_number = priv->channel->hw_value;
        } else {
@@ -2315,7 +2315,7 @@ static int cw1200_start_ap(struct cw1200_common *priv)
        struct wsm_start start = {
                .mode = priv->vif->p2p ?
                                WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
-               .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+               .band = (priv->channel->band == NL80211_BAND_5GHZ) ?
                                WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
                .channel_number = priv->channel->hw_value,
                .beacon_interval = conf->beacon_int,
index d28bd49cb5fd16132623630542788b448b2e8542..3d170287cd0b9df0428d8ec559f2f0f7e5c09a27 100644 (file)
@@ -1079,7 +1079,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
 
        hdr->band = ((arg->channel_number & 0xff00) ||
                     (arg->channel_number > 14)) ?
-                       IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+                       NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
        hdr->freq = ieee80211_channel_to_frequency(
                        arg->channel_number,
                        hdr->band);
index 9e0ca3048657533df52055ac8c30e01006e95048..680d60eabc75de4b50a44a7bb6395fb020fa4b83 100644 (file)
@@ -849,9 +849,9 @@ static int wsm_startup_indication(struct cw1200_common *priv,
 
        /* Disable unsupported frequency bands */
        if (!(priv->wsm_caps.fw_cap & 0x1))
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+               priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
        if (!(priv->wsm_caps.fw_cap & 0x2))
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+               priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
        priv->firmware_ready = 1;
        wake_up(&priv->wsm_startup_done);
index cd4777954f87f6ca79b81b7564df21791bc85a7d..56384a4e2a35de916294e00fb440de372faf9bde 100644 (file)
@@ -1482,7 +1482,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                         BIT(NL80211_IFTYPE_ADHOC);
        wl->hw->wiphy->max_scan_ssids = 1;
-       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+       wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
 
        wl->hw->queues = 4;
 
index b9e27b98bbc928ac2c7eb7dd9e48053c87ef60bf..fa01b0a0f312abd20092478e6aa768997f326d26 100644 (file)
@@ -32,7 +32,7 @@ void wl1251_elp_work(struct work_struct *work)
        struct delayed_work *dwork;
        struct wl1251 *wl;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wl = container_of(dwork, struct wl1251, elp_work);
 
        wl1251_debug(DEBUG_PSM, "elp work");
index cde0eaf99714b4054176bd7a3cad9e850d97abea..a27d4c22b6e88aaa1058d42b2accf9760a639272 100644 (file)
@@ -53,7 +53,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
 
        memset(status, 0, sizeof(struct ieee80211_rx_status));
 
-       status->band = IEEE80211_BAND_2GHZ;
+       status->band = NL80211_BAND_2GHZ;
        status->mactime = desc->timestamp;
 
        /*
index a0d6cccc56f3b62c92e5a62fe0e852055c74e58f..58b9d3c3a8332c5360b855deff5d9b00e874c8e3 100644 (file)
@@ -469,8 +469,8 @@ static const u8 wl12xx_rate_to_idx_5ghz[] = {
 };
 
 static const u8 *wl12xx_band_rate_to_idx[] = {
-       [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
-       [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
+       [NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
+       [NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
 };
 
 enum wl12xx_hw_rates {
@@ -1827,8 +1827,8 @@ static int wl12xx_setup(struct wl1271 *wl)
        wl->fw_status_priv_len = 0;
        wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
        wl->ofdm_only_ap = true;
-       wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
-       wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
+       wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
+       wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
        wl12xx_conf_init(wl);
 
        if (!fref_param) {
index ebed13af9852beac98f26392c3a98e8ab57ff01f..8d475393f9e33ae986a51d9b0281a3dc35de8d22 100644 (file)
@@ -27,7 +27,7 @@
 static int wl1271_get_scan_channels(struct wl1271 *wl,
                                    struct cfg80211_scan_request *req,
                                    struct basic_scan_channel_params *channels,
-                                   enum ieee80211_band band, bool passive)
+                                   enum nl80211_band band, bool passive)
 {
        struct conf_scan_settings *c = &wl->conf.scan;
        int i, j;
@@ -92,7 +92,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
 #define WL1271_NOTHING_TO_SCAN 1
 
 static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                           enum ieee80211_band band,
+                           enum nl80211_band band,
                            bool passive, u32 basic_rate)
 {
        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -144,12 +144,12 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
        cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
-       if (band == IEEE80211_BAND_2GHZ)
+       if (band == NL80211_BAND_2GHZ)
                cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
        else
                cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
 
-       if (wl->scan.ssid_len && wl->scan.ssid) {
+       if (wl->scan.ssid_len) {
                cmd->params.ssid_len = wl->scan.ssid_len;
                memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
        }
@@ -218,7 +218,7 @@ out:
 void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u32 rate, mask;
 
        switch (wl->scan.state) {
@@ -226,7 +226,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                break;
 
        case WL1271_SCAN_STATE_2GHZ_ACTIVE:
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
                mask = wlvif->bitrate_masks[band];
                if (wl->scan.req->no_cck) {
                        mask &= ~CONF_TX_CCK_RATES;
@@ -243,7 +243,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                break;
 
        case WL1271_SCAN_STATE_2GHZ_PASSIVE:
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
                mask = wlvif->bitrate_masks[band];
                if (wl->scan.req->no_cck) {
                        mask &= ~CONF_TX_CCK_RATES;
@@ -263,7 +263,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                break;
 
        case WL1271_SCAN_STATE_5GHZ_ACTIVE:
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
                rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
                ret = wl1271_scan_send(wl, wlvif, band, false, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -274,7 +274,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                break;
 
        case WL1271_SCAN_STATE_5GHZ_PASSIVE:
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
                rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
                ret = wl1271_scan_send(wl, wlvif, band, true, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -378,7 +378,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        wl12xx_adjust_channels(cfg, cfg_channels);
 
        if (!force_passive && cfg->active[0]) {
-               u8 band = IEEE80211_BAND_2GHZ;
+               u8 band = NL80211_BAND_2GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                                 wlvif->role_id, band,
                                                 req->ssids[0].ssid,
@@ -395,7 +395,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (!force_passive && cfg->active[1]) {
-               u8 band = IEEE80211_BAND_5GHZ;
+               u8 band = NL80211_BAND_5GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                                 wlvif->role_id, band,
                                                 req->ssids[0].ssid,
index a8d176ddc73c3c8e6a6f5f7adab2ec9ea7e1c924..63e95ba744fd4166fa82bd2a116d16937214fb78 100644 (file)
@@ -48,10 +48,10 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
        cmd->stop_tx = ch_switch->block_tx;
 
        switch (ch_switch->chandef.chan->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                cmd->band = WLCORE_BAND_2_4GHZ;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                cmd->band = WLCORE_BAND_5GHZ;
                break;
        default:
@@ -187,7 +187,7 @@ int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
 
        cmd->role_id = wlvif->role_id;
        cmd->channel = wlvif->channel;
-       if (wlvif->band == IEEE80211_BAND_5GHZ)
+       if (wlvif->band == NL80211_BAND_5GHZ)
                cmd->band = WLCORE_BAND_5GHZ;
        cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
 
index ff6e46dd61f8df450a6b2d15aa67dab0385be2e7..ef811848d1412433f87a3459cee6e2af664b0b5d 100644 (file)
@@ -64,13 +64,13 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
                                          u8 sync_band)
 {
        struct sk_buff *skb;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int freq;
 
        if (sync_band == WLCORE_BAND_5GHZ)
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
        else
-               band = IEEE80211_BAND_2GHZ;
+               band = NL80211_BAND_2GHZ;
 
        freq = ieee80211_channel_to_frequency(sync_channel, band);
 
index 1bf26cc7374e01b3cbbd7cf8e30fc69f9be5454b..ae47c79cb9b6c22bd4ba9e7327d97df675ea4e41 100644 (file)
@@ -137,8 +137,8 @@ static const u8 wl18xx_rate_to_idx_5ghz[] = {
 };
 
 static const u8 *wl18xx_band_rate_to_idx[] = {
-       [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
-       [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+       [NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+       [NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
 };
 
 enum wl18xx_hw_rates {
@@ -1302,12 +1302,12 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
                wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
 
                /* sanity check - we don't support this */
-               if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+               if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
                        return 0;
 
                return CONF_TX_RATE_USE_WIDE_CHAN;
        } else if (wl18xx_is_mimo_supported(wl) &&
-                  wlvif->band == IEEE80211_BAND_2GHZ) {
+                  wlvif->band == NL80211_BAND_2GHZ) {
                wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
                /*
                 * we don't care about HT channel here - if a peer doesn't
@@ -1996,24 +1996,24 @@ static int wl18xx_setup(struct wl1271 *wl)
                 * siso40.
                 */
                if (wl18xx_is_mimo_supported(wl))
-                       wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+                       wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
                                          &wl18xx_mimo_ht_cap_2ghz);
                else
-                       wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+                       wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
                                          &wl18xx_siso40_ht_cap_2ghz);
 
                /* 5Ghz is always wide */
-               wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+               wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
                                  &wl18xx_siso40_ht_cap_5ghz);
        } else if (priv->conf.ht.mode == HT_MODE_WIDE) {
-               wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+               wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
                                  &wl18xx_siso40_ht_cap_2ghz);
-               wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+               wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
                                  &wl18xx_siso40_ht_cap_5ghz);
        } else if (priv->conf.ht.mode == HT_MODE_SISO20) {
-               wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+               wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
                                  &wl18xx_siso20_ht_cap);
-               wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+               wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
                                  &wl18xx_siso20_ht_cap);
        }
 
index bc15aa2c3efad73582de0fab4a98f6836280272c..4e522154435463e47606453363ffdf8b2cf8b34f 100644 (file)
@@ -110,7 +110,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
        /* TODO: per-band ies? */
        if (cmd->active[0]) {
-               u8 band = IEEE80211_BAND_2GHZ;
+               u8 band = NL80211_BAND_2GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
@@ -127,7 +127,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        }
 
        if (cmd->active[1] || cmd->dfs) {
-               u8 band = IEEE80211_BAND_5GHZ;
+               u8 band = NL80211_BAND_5GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
@@ -253,7 +253,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
        cmd->terminate_on_report = 0;
 
        if (cmd->active[0]) {
-               u8 band = IEEE80211_BAND_2GHZ;
+               u8 band = NL80211_BAND_2GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
@@ -270,7 +270,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (cmd->active[1] || cmd->dfs) {
-               u8 band = IEEE80211_BAND_5GHZ;
+               u8 band = NL80211_BAND_5GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
index 3406ffb53325c16ed47271feb470da95f0f0965e..ebaf66ef3f84435388df178c5932070c51443c6e 100644 (file)
@@ -43,7 +43,7 @@ void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
 
        if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
                rate->idx = fw_rate;
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
                rate->flags = 0;
        } else {
index f01d24baff7cf00712cf07667f9b046b3d00dab2..33153565ad6261ed908c8bb4c9a6f2ece45207cb 100644 (file)
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
 
 static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
                                     struct wl12xx_vif *wlvif,
-                                    enum ieee80211_band band,
+                                    enum nl80211_band band,
                                     int channel)
 {
        struct wl12xx_cmd_role_start *cmd;
@@ -438,7 +438,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
        wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
        cmd->role_id = wlvif->dev_role_id;
-       if (band == IEEE80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ)
                cmd->band = WLCORE_BAND_5GHZ;
        cmd->channel = channel;
 
@@ -524,7 +524,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
 
        cmd->role_id = wlvif->role_id;
-       if (wlvif->band == IEEE80211_BAND_5GHZ)
+       if (wlvif->band == NL80211_BAND_5GHZ)
                cmd->band = WLCORE_BAND_5GHZ;
        cmd->channel = wlvif->channel;
        cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -693,10 +693,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        cmd->ap.local_rates = cpu_to_le32(supported_rates);
 
        switch (wlvif->band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                cmd->band = WLCORE_BAND_2_4GHZ;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                cmd->band = WLCORE_BAND_5GHZ;
                break;
        default:
@@ -773,7 +773,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
 
        cmd->role_id = wlvif->role_id;
-       if (wlvif->band == IEEE80211_BAND_5GHZ)
+       if (wlvif->band == NL80211_BAND_5GHZ)
                cmd->band = WLCORE_BAND_5GHZ;
        cmd->channel = wlvif->channel;
        cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -1164,7 +1164,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        }
 
        rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
-       if (band == IEEE80211_BAND_2GHZ)
+       if (band == NL80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, role_id,
                                              template_id_2_4,
                                              skb->data, skb->len, 0, rate);
@@ -1195,7 +1195,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
        wl1271_debug(DEBUG_SCAN, "set ap probe request template");
 
        rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
-       if (wlvif->band == IEEE80211_BAND_2GHZ)
+       if (wlvif->band == NL80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, wlvif->role_id,
                                              CMD_TEMPL_CFG_PROBE_REQ_2_4,
                                              skb->data, skb->len, 0, rate);
@@ -1628,19 +1628,19 @@ out:
        return ret;
 }
 
-static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
 {
        /*
         * map the given band/channel to the respective predefined
         * bit expected by the fw
         */
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                /* channels 1..14 are mapped to 0..13 */
                if (ch >= 1 && ch <= 14)
                        return ch - 1;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                switch (ch) {
                case 8 ... 16:
                        /* channels 8,12,16 are mapped to 18,19,20 */
@@ -1670,7 +1670,7 @@ static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
 }
 
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
-                                    enum ieee80211_band band)
+                                    enum nl80211_band band)
 {
        int ch_bit_idx = 0;
 
@@ -1699,7 +1699,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
 
        memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
 
-       for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+       for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
                band = wiphy->bands[b];
                for (i = 0; i < band->n_channels; i++) {
                        struct ieee80211_channel *channel = &band->channels[i];
@@ -1851,7 +1851,7 @@ out:
 }
 
 static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                         u8 role_id, enum ieee80211_band band, u8 channel)
+                         u8 role_id, enum nl80211_band band, u8 channel)
 {
        struct wl12xx_cmd_roc *cmd;
        int ret = 0;
@@ -1870,10 +1870,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        cmd->role_id = role_id;
        cmd->channel = channel;
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                cmd->band = WLCORE_BAND_2_4GHZ;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                cmd->band = WLCORE_BAND_5GHZ;
                break;
        default:
@@ -1925,7 +1925,7 @@ out:
 }
 
 int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
-              enum ieee80211_band band, u8 channel)
+              enum nl80211_band band, u8 channel)
 {
        int ret = 0;
 
@@ -1995,7 +1995,7 @@ out:
 
 /* start dev role and roc on its channel */
 int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                    enum ieee80211_band band, int channel)
+                    enum nl80211_band band, int channel)
 {
        int ret;
 
index e28e2f2303ce9d0ed26c00db7e913c8698ce341c..52c3b486046186f18c4391875d4668d183120877 100644 (file)
@@ -40,7 +40,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                    enum ieee80211_band band, int channel);
+                    enum nl80211_band band, int channel);
 int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
@@ -83,14 +83,14 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                              u8 hlid);
 int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
-              enum ieee80211_band band, u8 channel);
+              enum nl80211_band band, u8 channel);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
 int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                        struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           u8 hlid);
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
-                                    enum ieee80211_band band);
+                                    enum nl80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
 int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           u8 feature, u8 enable, u8 value);
index dde36203ca426d04bb31fc963adb635981a10cf4..10fd24c28ece332f9dd5da7fc5ae9f32f2b727db 100644 (file)
@@ -243,7 +243,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
        struct delayed_work *dwork;
        struct wl1271 *wl;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wl = container_of(dwork, struct wl1271, tx_watchdog_work);
 
        mutex_lock(&wl->mutex);
@@ -1930,7 +1930,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
                wlcore_enable_interrupts(wl);
 
-       wl->band = IEEE80211_BAND_2GHZ;
+       wl->band = NL80211_BAND_2GHZ;
 
        wl->rx_counter = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
@@ -2011,7 +2011,7 @@ static void wlcore_channel_switch_work(struct work_struct *work)
        struct wl12xx_vif *wlvif;
        int ret;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
        wl = wlvif->wl;
 
@@ -2047,7 +2047,7 @@ static void wlcore_connection_loss_work(struct work_struct *work)
        struct ieee80211_vif *vif;
        struct wl12xx_vif *wlvif;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
        wl = wlvif->wl;
 
@@ -2076,7 +2076,7 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
        unsigned long time_spare;
        int ret;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wlvif = container_of(dwork, struct wl12xx_vif,
                             pending_auth_complete_work);
        wl = wlvif->wl;
@@ -2240,8 +2240,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                wlvif->rate_set = CONF_TX_ENABLED_RATES;
        }
 
-       wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-       wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+       wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+       wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
        wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
 
        /*
@@ -2330,7 +2330,7 @@ power_off:
         * 11a channels if not supported
         */
        if (!wl->enable_11a)
-               wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
+               wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
 
        wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
                     wl->enable_11a ? "" : "not ");
@@ -5588,7 +5588,7 @@ static void wlcore_roc_complete_work(struct work_struct *work)
        struct wl1271 *wl;
        int ret;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wl = container_of(dwork, struct wl1271, roc_complete_work);
 
        ret = wlcore_roc_completed(wl);
@@ -5871,7 +5871,7 @@ static const struct ieee80211_ops wl1271_ops = {
 };
 
 
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
 {
        u8 idx;
 
@@ -6096,21 +6096,21 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
         * We keep local copies of the band structs because we need to
         * modify them on a per-device basis.
         */
-       memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+       memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
               sizeof(wl1271_band_2ghz));
-       memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
-              &wl->ht_cap[IEEE80211_BAND_2GHZ],
+       memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
+              &wl->ht_cap[NL80211_BAND_2GHZ],
               sizeof(*wl->ht_cap));
-       memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+       memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
               sizeof(wl1271_band_5ghz));
-       memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
-              &wl->ht_cap[IEEE80211_BAND_5GHZ],
+       memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
+              &wl->ht_cap[NL80211_BAND_5GHZ],
               sizeof(*wl->ht_cap));
 
-       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-               &wl->bands[IEEE80211_BAND_2GHZ];
-       wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-               &wl->bands[IEEE80211_BAND_5GHZ];
+       wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+               &wl->bands[NL80211_BAND_2GHZ];
+       wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+               &wl->bands[NL80211_BAND_5GHZ];
 
        /*
         * allow 4 queues per mac address we support +
@@ -6205,7 +6205,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
        wl->channel = 0;
        wl->rx_counter = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-       wl->band = IEEE80211_BAND_2GHZ;
+       wl->band = NL80211_BAND_2GHZ;
        wl->channel_type = NL80211_CHAN_NO_HT;
        wl->flags = 0;
        wl->sg_enabled = true;
index 4cd316e614665d4260370dfe945b9a629323f084..b36133b739cb383e36af616afd890e528feeca4c 100644 (file)
@@ -38,7 +38,7 @@ void wl1271_elp_work(struct work_struct *work)
        struct wl12xx_vif *wlvif;
        int ret;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wl = container_of(dwork, struct wl1271, elp_work);
 
        wl1271_debug(DEBUG_PSM, "elp work");
@@ -202,7 +202,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                 * enable beacon early termination.
                 * Not relevant for 5GHz and for high rates.
                 */
-               if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+               if ((wlvif->band == NL80211_BAND_2GHZ) &&
                    (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
                        ret = wl1271_acx_bet_enable(wl, wlvif, true);
                        if (ret < 0)
@@ -213,7 +213,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                wl1271_debug(DEBUG_PSM, "leaving psm");
 
                /* disable beacon early termination */
-               if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+               if ((wlvif->band == NL80211_BAND_2GHZ) &&
                    (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
                        ret = wl1271_acx_bet_enable(wl, wlvif, false);
                        if (ret < 0)
index 34e7e938ede4a8cf1d61220df6f97147c6ec020b..c9bd294a0aa65b2f294c4168a725f579ab9b4068 100644 (file)
@@ -64,9 +64,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
        memset(status, 0, sizeof(struct ieee80211_rx_status));
 
        if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
-               status->band = IEEE80211_BAND_2GHZ;
+               status->band = NL80211_BAND_2GHZ;
        else
-               status->band = IEEE80211_BAND_5GHZ;
+               status->band = NL80211_BAND_5GHZ;
 
        status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
 
index f5a7087cfb97831fea0296186d59b3acbc0d7ac5..57c0565637d688786d06412e10d516396b5b1669 100644 (file)
@@ -146,7 +146,7 @@ struct wl1271_rx_descriptor {
 } __packed;
 
 int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+u8 wl1271_rate_to_idx(int rate, enum nl80211_band band);
 int wl1271_rx_filter_enable(struct wl1271 *wl,
                            int index, bool enable,
                            struct wl12xx_rx_filter *filter);
index 1e3d51cd673ab6d2be132c6fda17f698547b3ff7..23343643207ad53989219c39d1010144bc25814f 100644 (file)
@@ -38,7 +38,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
        struct wl12xx_vif *wlvif;
        int ret;
 
-       dwork = container_of(work, struct delayed_work, work);
+       dwork = to_delayed_work(work);
        wl = container_of(dwork, struct wl1271, scan_complete_work);
 
        wl1271_debug(DEBUG_SCAN, "Scanning complete");
@@ -164,7 +164,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
                struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
                u32 delta_per_probe;
 
-               if (band == IEEE80211_BAND_5GHZ)
+               if (band == NL80211_BAND_5GHZ)
                        delta_per_probe = c->dwell_time_delta_per_probe_5;
                else
                        delta_per_probe = c->dwell_time_delta_per_probe;
@@ -215,7 +215,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
                        channels[j].channel = req_channels[i]->hw_value;
 
                        if (n_pactive_ch &&
-                           (band == IEEE80211_BAND_2GHZ) &&
+                           (band == NL80211_BAND_2GHZ) &&
                            (channels[j].channel >= 12) &&
                            (channels[j].channel <= 14) &&
                            (flags & IEEE80211_CHAN_NO_IR) &&
@@ -266,7 +266,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
                                         n_channels,
                                         n_ssids,
                                         cfg->channels_2,
-                                        IEEE80211_BAND_2GHZ,
+                                        NL80211_BAND_2GHZ,
                                         false, true, 0,
                                         MAX_CHANNELS_2GHZ,
                                         &n_pactive_ch,
@@ -277,7 +277,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
                                         n_channels,
                                         n_ssids,
                                         cfg->channels_2,
-                                        IEEE80211_BAND_2GHZ,
+                                        NL80211_BAND_2GHZ,
                                         false, false,
                                         cfg->passive[0],
                                         MAX_CHANNELS_2GHZ,
@@ -289,7 +289,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
                                         n_channels,
                                         n_ssids,
                                         cfg->channels_5,
-                                        IEEE80211_BAND_5GHZ,
+                                        NL80211_BAND_5GHZ,
                                         false, true, 0,
                                         wl->max_channels_5,
                                         &n_pactive_ch,
@@ -300,7 +300,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
                                         n_channels,
                                         n_ssids,
                                         cfg->channels_5,
-                                        IEEE80211_BAND_5GHZ,
+                                        NL80211_BAND_5GHZ,
                                         true, true,
                                         cfg->passive[1],
                                         wl->max_channels_5,
@@ -312,7 +312,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
                                         n_channels,
                                         n_ssids,
                                         cfg->channels_5,
-                                        IEEE80211_BAND_5GHZ,
+                                        NL80211_BAND_5GHZ,
                                         false, false,
                                         cfg->passive[1] + cfg->dfs,
                                         wl->max_channels_5,
index f0ac36139bcc1419a3963a5048e8c3013fce5cd4..c1b8e4e9d70b2d3db4ea4d321d18ad5e798f1e02 100644 (file)
@@ -453,7 +453,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 }
 
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
-                               enum ieee80211_band rate_band)
+                               enum nl80211_band rate_band)
 {
        struct ieee80211_supported_band *band;
        u32 enabled_rates = 0;
index 79cb3ff8b71f576aef0913f0a8828a95e3ede130..e2ba62d92d7a5971bfeb06900c20c18bea0e3e04 100644 (file)
@@ -246,9 +246,9 @@ int wlcore_tx_complete(struct wl1271 *wl);
 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
-                               enum ieee80211_band rate_band);
+                               enum nl80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                      struct sk_buff *skb, struct ieee80211_sta *sta);
index 72c31a8edcfb30490963d6700ae0f7104c4358e2..8f28aa02230cb559efb3cf478af76ebd5d0e75c1 100644 (file)
@@ -342,7 +342,7 @@ struct wl1271 {
        struct wl12xx_vif *sched_vif;
 
        /* The current band */
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        struct completion *elp_compl;
        struct delayed_work elp_work;
@@ -517,7 +517,7 @@ void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                              struct wl1271_station *wl_sta, bool in_conn);
 
 static inline void
-wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band,
                  struct ieee80211_sta_ht_cap *ht_cap)
 {
        memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
index 27c56876b2c13821606a662a3d06a6ae4e24ccdb..5c4199f3a19a9b41b7e6917614a1af4d91866d53 100644 (file)
@@ -392,7 +392,7 @@ struct wl12xx_vif {
        u8 ssid_len;
 
        /* The current band */
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int channel;
        enum nl80211_channel_type channel_type;
 
index d5c371d77ddf238d00e6ce7c3e46e2c6a4a6136b..13fd734b61ece4b4b7756da70dbda1e664665db0 100644 (file)
@@ -1287,7 +1287,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
                printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
                       dev->name, rc);
        else {
-               dev->trans_start = jiffies; /* prevent tx timeout */
+               netif_trans_update(dev); /* prevent tx timeout */
                netif_wake_queue(dev);
        }
 }
@@ -1454,7 +1454,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
        struct wl3501_card *this = netdev_priv(dev);
 
        wrqu->freq.m = 100000 *
-               ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
+               ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
        wrqu->freq.e = 1;
        return 0;
 }
index 6f5c793a7855e90f7255633e15baa0608676c957..dea049b2556f337ed1f1504cef7e834882035707 100644 (file)
@@ -845,7 +845,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
        usb_unlink_urb(zd->tx_urb);
        dev->stats.tx_errors++;
        /* Restart the timeout to quiet the watchdog: */
-       dev->trans_start = jiffies; /* prevent tx timeout */
+       netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static int zd1201_set_mac_address(struct net_device *dev, void *p)
index e539d9b1b562687d559f4a81f9e16eced68076bb..3e37a045f7025fa39cf55f55cc1d59d26b8feee9 100644 (file)
@@ -1068,7 +1068,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
        }
 
        stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
-       stats.band = IEEE80211_BAND_2GHZ;
+       stats.band = NL80211_BAND_2GHZ;
        stats.signal = zd_check_signal(hw, status->signal_strength);
 
        rate = zd_rx_rate(buffer, status);
@@ -1395,7 +1395,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
        mac->band.n_channels = ARRAY_SIZE(zd_channels);
        mac->band.channels = mac->channels;
 
-       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
+       hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band;
 
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
index c32cbb59360088f520b930b9487cc7b3ce48d9d5..f068b6513cd286659b1e468e726c45d868a05b7b 100644 (file)
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
 {
        struct btt *btt = bdev->bd_disk->private_data;
 
-       btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+       btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
        page_endio(page, rw & WRITE, 0);
        return 0;
 }
index fc82743aefb6574f6a1e47ecd68e1d0b1557507f..19f822d7f65274cb3e4a737240b0bdbfd12a8548 100644 (file)
@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
        [ND_CMD_IMPLEMENTED] = { },
        [ND_CMD_SMART] = {
                .out_num = 2,
-               .out_sizes = { 4, 8, },
+               .out_sizes = { 4, 128, },
        },
        [ND_CMD_SMART_THRESHOLD] = {
                .out_num = 2,
index 79646d0c3277d61ad0b6d901b8d3da5809f2c9bf..182a93fe37128d87d53b3c77d121ca28bb3ec8fd 100644 (file)
@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
                set_badblock(bb, start_sector, num_sectors);
 }
 
-static void namespace_add_poison(struct list_head *poison_list,
-               struct badblocks *bb, struct resource *res)
+static void badblocks_populate(struct list_head *poison_list,
+               struct badblocks *bb, const struct resource *res)
 {
        struct nd_poison *pl;
 
@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
 }
 
 /**
- * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks
- * @ndns:      the namespace containing poison ranges
- * @bb:                badblocks instance to populate
- * @offset:    offset at the start of the namespace before 'sector 0'
+ * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
  *
- * The poison list generated during NFIT initialization may contain multiple,
- * possibly overlapping ranges in the SPA (System Physical Address) space.
- * Compare each of these ranges to the namespace currently being initialized,
- * and add badblocks to the gendisk for all matching sub-ranges
+ * The poison list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges.  Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
  */
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
-               struct badblocks *bb, resource_size_t offset)
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+               struct badblocks *bb, const struct resource *res)
 {
-       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
-       struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
        struct nvdimm_bus *nvdimm_bus;
        struct list_head *poison_list;
-       struct resource res = {
-               .start = nsio->res.start + offset,
-               .end = nsio->res.end,
-       };
 
-       nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent);
+       if (!is_nd_pmem(&nd_region->dev)) {
+               dev_WARN_ONCE(&nd_region->dev, 1,
+                               "%s only valid for pmem regions\n", __func__);
+               return;
+       }
+       nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
        poison_list = &nvdimm_bus->poison_list;
 
        nvdimm_bus_lock(&nvdimm_bus->dev);
-       namespace_add_poison(poison_list, bb, &res);
+       badblocks_populate(poison_list, bb, res);
        nvdimm_bus_unlock(&nvdimm_bus->dev);
 }
-EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison);
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
 
 static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
 {
index 1799bd97a9ce5783a3cb73883a6328c8bf11fc02..875c524fafb0ed2235a9724517601bf45107c80a 100644 (file)
@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
 int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
                char *name);
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
-               struct badblocks *bb, resource_size_t offset);
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+               struct badblocks *bb, const struct resource *res);
 int nd_blk_region_init(struct nd_region *nd_region);
 void __nd_iostat_start(struct bio *bio, unsigned long *start);
 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
index 254d3bc13f70e22dccbf19709d3e35de8cf78315..e071e214febadae71d2d369319b76cd5b6b1a851 100644 (file)
@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
        } else {
                /* from init we validate */
                if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
-                       return -EINVAL;
+                       return -ENODEV;
        }
 
        if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
index cc31c6f1f88e6138f22f8002f70fceb036667179..5101f3ab4f296a2edc0a290fc4ab87a3ba12f9bb 100644 (file)
@@ -103,6 +103,20 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                        flush_dcache_page(page);
                }
        } else {
+               /*
+                * Note that we write the data both before and after
+                * clearing poison.  The write before clear poison
+                * handles situations where the latest written data is
+                * preserved and the clear poison operation simply marks
+                * the address range as valid without changing the data.
+                * In this case application software can assume that an
+                * interrupted write will either return the new good
+                * data or an error.
+                *
+                * However, if pmem_clear_poison() leaves the data in an
+                * indeterminate state we need to perform the write
+                * after clear poison.
+                */
                flush_dcache_page(page);
                memcpy_to_pmem(pmem_addr, mem + off, len);
                if (unlikely(bad_pmem)) {
@@ -151,7 +165,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
        struct pmem_device *pmem = bdev->bd_disk->private_data;
        int rc;
 
-       rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+       rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
        if (rw & WRITE)
                wmb_pmem();
 
@@ -244,7 +258,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
 static int pmem_attach_disk(struct device *dev,
                struct nd_namespace_common *ndns, struct pmem_device *pmem)
 {
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        int nid = dev_to_node(dev);
+       struct resource bb_res;
        struct gendisk *disk;
 
        blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +287,17 @@ static int pmem_attach_disk(struct device *dev,
        devm_exit_badblocks(dev, &pmem->bb);
        if (devm_init_badblocks(dev, &pmem->bb))
                return -ENOMEM;
-       nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
-
+       bb_res.start = nsio->res.start + pmem->data_offset;
+       bb_res.end = nsio->res.end;
+       if (is_nd_pfn(dev)) {
+               struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+               struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+               bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
+               bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+       }
+       nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
+                       &bb_res);
        disk->bb = &pmem->bb;
        add_disk(disk);
        revalidate_disk(disk);
@@ -372,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
         */
        start += start_pad;
        npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
-       if (nd_pfn->mode == PFN_MODE_PMEM)
-               offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+       if (nd_pfn->mode == PFN_MODE_PMEM) {
+               unsigned long memmap_size;
+
+               /*
+                * vmemmap_populate_hugepages() allocates the memmap array in
+                * HPAGE_SIZE chunks.
+                */
+               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+               offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
                        - start;
-       else if (nd_pfn->mode == PFN_MODE_RAM)
+       else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
        else
                goto err;
@@ -553,7 +585,7 @@ static int nd_pmem_probe(struct device *dev)
        ndns->rw_bytes = pmem_rw_bytes;
        if (devm_init_badblocks(dev, &pmem->bb))
                return -ENOMEM;
-       nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
+       nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
 
        if (is_nd_btt(dev)) {
                /* btt allocates its own request_queue */
@@ -595,14 +627,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
 {
        struct pmem_device *pmem = dev_get_drvdata(dev);
        struct nd_namespace_common *ndns = pmem->ndns;
+       struct nd_region *nd_region = to_nd_region(dev->parent);
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+       struct resource res = {
+               .start = nsio->res.start + pmem->data_offset,
+               .end = nsio->res.end,
+       };
 
        if (event != NVDIMM_REVALIDATE_POISON)
                return;
 
-       if (is_nd_btt(dev))
-               nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
-       else
-               nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
+       if (is_nd_pfn(dev)) {
+               struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+               struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+               res.start += __le32_to_cpu(pfn_sb->start_pad);
+               res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+       }
+
+       nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
 }
 
 MODULE_ALIAS("pmem");
index 24ccda303efb21daa2bf982021655db286ffff4b..4fd733ff72b1cb7cf38023701d640a059b4e4670 100644 (file)
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (result > 0) {
                dev_err(dev->ctrl.device,
                        "Could not set queue count (%d)\n", result);
-               nr_io_queues = 0;
-               result = 0;
+               return 0;
        }
 
        if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         * If we enable msix early due to not intx, disable it again before
         * setting up the full range we need.
         */
-       if (!pdev->irq)
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       else if (pdev->msix_enabled)
                pci_disable_msix(pdev);
 
        for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        if (pci_enable_device_mem(pdev))
                return result;
 
-       dev->entry[0].vector = pdev->irq;
        pci_set_master(pdev);
 
        if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        }
 
        /*
-        * Some devices don't advertse INTx interrupts, pre-enable a single
-        * MSIX vec for setup. We'll adjust this later.
+        * Some devices and/or platforms don't advertise or work with INTx
+        * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+        * adjust this later.
         */
-       if (!pdev->irq) {
-               result = pci_enable_msix(pdev, dev->entry, 1);
-               if (result < 0)
-                       goto disable;
+       if (pci_enable_msix(pdev, dev->entry, 1)) {
+               pci_enable_msi(pdev);
+               dev->entry[0].vector = pdev->irq;
+       }
+
+       if (!dev->entry[0].vector) {
+               result = -ENODEV;
+               goto disable;
        }
 
        cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
+       if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+               goto out;
+
        set_bit(NVME_CTRL_RESETTING, &dev->flags);
 
        result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-       del_timer_sync(&dev->watchdog_timer);
-
        set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->async_work);
+       flush_work(&dev->reset_work);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);
index 8ba19bba31569f22c6529a739fa1d30adf5c2f0f..2bb3c5799ac4b0146b2760d8823fb1eb28a6bdaa 100644 (file)
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
        if (ret)
                goto close_banks;
 
-       while (val_size) {
+       while (val_size >= reg_size) {
                if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
                        /* fill up non-data register */
                        *buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
                }
 
                buf++;
-               val_size--;
+               val_size -= reg_size;
                offset += reg_size;
        }
 
index 8453f08d2ef41b2c8c93596b5a825f24562f5ff2..e051e1b57609e45afa170f1d92cde38c7beefe5e 100644 (file)
@@ -41,8 +41,8 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
        return -EINVAL;
 }
 
-static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
-                                  u32 addr)
+static void of_mdiobus_register_phy(struct mii_bus *mdio,
+                                   struct device_node *child, u32 addr)
 {
        struct phy_device *phy;
        bool is_c45;
@@ -56,8 +56,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
                phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
        else
                phy = get_phy_device(mdio, addr, is_c45);
-       if (IS_ERR_OR_NULL(phy))
-               return 1;
+       if (IS_ERR(phy))
+               return;
 
        rc = irq_of_parse_and_map(child, 0);
        if (rc > 0) {
@@ -81,25 +81,22 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
        if (rc) {
                phy_device_free(phy);
                of_node_put(child);
-               return 1;
+               return;
        }
 
        dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
                child->name, addr);
-
-       return 0;
 }
 
-static int of_mdiobus_register_device(struct mii_bus *mdio,
-                                     struct device_node *child,
-                                     u32 addr)
+static void of_mdiobus_register_device(struct mii_bus *mdio,
+                                      struct device_node *child, u32 addr)
 {
        struct mdio_device *mdiodev;
        int rc;
 
        mdiodev = mdio_device_create(mdio, addr);
        if (IS_ERR(mdiodev))
-               return 1;
+               return;
 
        /* Associate the OF node with the device structure so it
         * can be looked up later.
@@ -112,13 +109,11 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
        if (rc) {
                mdio_device_free(mdiodev);
                of_node_put(child);
-               return 1;
+               return;
        }
 
        dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
                child->name, addr);
-
-       return 0;
 }
 
 int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
@@ -214,6 +209,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        bool scanphys = false;
        int addr, rc;
 
+       /* Do not continue if the node is disabled */
+       if (!of_device_is_available(np))
+               return -ENODEV;
+
        /* Mask out all PHYs from auto probing.  Instead the PHYs listed in
         * the device tree are populated after the bus has been registered */
        mdio->phy_mask = ~0;
index b48ac6300c792d4336487f841a2267a5b883c6f5..a0e5260bd006df0818d4ef2bd9296b50152bf1ac 100644 (file)
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *root_inode;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = OPROFILEFS_MAGIC;
        sb->s_op = &s_ops;
        sb->s_time_gran = 1;
index 01b9d0a00abcb06186bdb80d44c5aa9e5f05838c..d11cdbb8fba3edab6d0bfc69490c72b0c40394dd 100644 (file)
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
 }
 EXPORT_SYMBOL(pci_write_vpd);
 
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev:       pci device struct
+ * @len:       size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+       if (!dev->vpd || !dev->vpd->ops)
+               return -ENODEV;
+       return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 
 /**
@@ -498,9 +511,23 @@ out:
        return ret ? ret : count;
 }
 
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_vpd *vpd = dev->vpd;
+
+       if (len == 0 || len > PCI_VPD_MAX_SIZE)
+               return -EIO;
+
+       vpd->valid = 1;
+       vpd->len = len;
+
+       return 0;
+}
+
 static const struct pci_vpd_ops pci_vpd_ops = {
        .read = pci_vpd_read,
        .write = pci_vpd_write,
+       .set_size = pci_vpd_set_size,
 };
 
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
        return ret;
 }
 
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_dev *tdev = pci_get_slot(dev->bus,
+                                           PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       int ret;
+
+       if (!tdev)
+               return -ENODEV;
+
+       ret = pci_set_vpd_size(tdev, len);
+       pci_dev_put(tdev);
+       return ret;
+}
+
 static const struct pci_vpd_ops pci_vpd_f0_ops = {
        .read = pci_vpd_f0_read,
        .write = pci_vpd_f0_write,
+       .set_size = pci_vpd_f0_set_size,
 };
 
 int pci_vpd_init(struct pci_dev *dev)
index eb5a2755a1646649a7e1aaea263a1cf118a45af4..2f817fa4c661e72274e873c38f460cefe04faa79 100644 (file)
@@ -32,7 +32,7 @@
 #define to_imx6_pcie(x)        container_of(x, struct imx6_pcie, pp)
 
 struct imx6_pcie {
-       struct gpio_desc        *reset_gpio;
+       int                     reset_gpio;
        struct clk              *pcie_bus;
        struct clk              *pcie_phy;
        struct clk              *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
        usleep_range(200, 500);
 
        /* Some boards don't have PCIe reset GPIO. */
-       if (imx6_pcie->reset_gpio) {
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
                msleep(100);
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
        }
        return 0;
 
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 {
        struct imx6_pcie *imx6_pcie;
        struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
        struct resource *dbi_base;
        struct device_node *node = pdev->dev.of_node;
        int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
                return PTR_ERR(pp->dbi_base);
 
        /* Fetch GPIOs */
-       imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-                                                       GPIOD_OUT_LOW);
+       imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+                                           GPIOF_OUT_INIT_LOW, "PCIe reset");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get reset gpio\n");
+                       return ret;
+               }
+       }
 
        /* Fetch clocks */
        imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
index e982010f0ed19e0d0379da4a1c02334a1db9f175..342b6918bbde8534b0f89bade814624316a54e9d 100644 (file)
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
        u8 *data = (u8 *) buf;
 
        /* Several chips lock up trying to read undefined config space */
-       if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+       if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
                size = dev->cfg_size;
        else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
                size = 128;
index d0fb93481573ad3282e917e09d4852c723ecbd15..a814bbb80fcb3d1ddbf35508b7dcf5808f3d0e5d 100644 (file)
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
 struct pci_vpd_ops {
        ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
        ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+       int (*set_size)(struct pci_dev *dev, size_t len);
 };
 
 struct pci_vpd {
index 4c2fa05b4589e2095db8430b77fe338077a6c7d7..944674ee34644fa2583f2193ad738a4b5cbe4912 100644 (file)
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
        int     stschg_irq;     /* card-status-change irq */
        int     card_irq;       /* card irq */
        int     eject_irq;      /* db1200/pb1200 have these */
+       int     insert_gpio;    /* db1000 carddetect gpio */
 
 #define BOARD_TYPE_DEFAULT     0       /* most boards */
 #define BOARD_TYPE_DB1200      1       /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
 /* carddetect gpio: low-active */
 static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
 {
-       return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+       return !gpio_get_value(sock->insert_gpio);
 }
 
 static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
        sock->card_irq = r ? r->start : 0;
 
-       /* insert: irq which triggers on card insertion/ejection */
+       /* insert: irq which triggers on card insertion/ejection
+        * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+        */
        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
        sock->insert_irq = r ? r->start : -1;
+       if (sock->board_type == BOARD_TYPE_DEFAULT) {
+               sock->insert_gpio = r ? r->start : -1;
+               sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+       }
 
        /* stschg: irq which trigger on card status change (optional) */
        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
index 32346b5a8a119789c1857c90ec13902616ace318..f70090897fdf19c9777c332401dd01de7b1efc52 100644 (file)
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
                        break;
                case CPU_PM_EXIT:
                case CPU_PM_ENTER_FAILED:
-                        /* Restore and enable the counter */
-                       armpmu_start(event, PERF_EF_RELOAD);
+                        /*
+                         * Restore and enable the counter.
+                         * armpmu_start() indirectly calls
+                         *
+                         * perf_event_update_userpage()
+                         *
+                         * that requires RCU read locking to be functional,
+                         * wrap the call within RCU_NONIDLE to make the
+                         * RCU subsystem aware this cpu is not idle from
+                         * an RCU perspective for the armpmu_start() call
+                         * duration.
+                         */
+                       RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
                        break;
                default:
                        break;
index 77e2d02e6bee228ac033a28cccea90047439ed86..793ecb6d87bcaa2a56a914b2745bea6453929637 100644 (file)
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
        if (!np)
                return -ENODEV;
 
+       if (!dev->parent || !dev->parent->of_node)
+               return -ENODEV;
+
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
        if (IS_ERR(dp))
                return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
                return ret;
        }
 
-       dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+       dp->grf = syscon_node_to_regmap(dev->parent->of_node);
        if (IS_ERR(dp->grf)) {
-               dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+               dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
                return PTR_ERR(dp->grf);
        }
 
index 887b4c27195f430766030e088b33ea2b037a7967..6ebcf3e41c467233c7d198cc72a7b93a4f7fe4b1 100644 (file)
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
        struct regmap *grf;
        unsigned int reg_offset;
 
-       grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+       if (!dev->parent || !dev->parent->of_node)
+               return -ENODEV;
+
+       grf = syscon_node_to_regmap(dev->parent->of_node);
        if (IS_ERR(grf)) {
                dev_err(dev, "Missing rockchip,grf property\n");
                return PTR_ERR(grf);
index debe1219d76d61b1c3908bf4820de8644dd65d9c..fc8cbf6117234ba31541b3f0153d0d23e8bc644b 100644 (file)
@@ -2,6 +2,7 @@ config PINCTRL_IMX
        bool
        select PINMUX
        select PINCONF
+       select REGMAP
 
 config PINCTRL_IMX1_CORE
        bool
index 46210512d8ecff75cac22f306928798af8436454..9cfa544072b5a357251889328e2bff7e576ed88c 100644 (file)
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
 
        if (of_property_read_bool(dev_np, "fsl,input-sel")) {
                np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
-               if (np) {
-                       ipctl->input_sel_base = of_iomap(np, 0);
-                       if (IS_ERR(ipctl->input_sel_base)) {
-                               of_node_put(np);
-                               dev_err(&pdev->dev,
-                                       "iomuxc input select base address not found\n");
-                               return PTR_ERR(ipctl->input_sel_base);
-                       }
-               } else {
+               if (!np) {
                        dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
                        return -EINVAL;
                }
+
+               ipctl->input_sel_base = of_iomap(np, 0);
                of_node_put(np);
+               if (!ipctl->input_sel_base) {
+                       dev_err(&pdev->dev,
+                               "iomuxc input select base address not found\n");
+                       return -ENOMEM;
+               }
        }
 
        imx_pinctrl_desc.name = dev_name(&pdev->dev);
index 85536b467c25b50f43e19171996e009352ccb682..6c2c816f8e5f49dfa8bee2727ccd5d115cfdd643 100644 (file)
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
        spin_unlock(&pctrl->lock);
 }
 
+static void intel_gpio_irq_enable(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+       const struct intel_community *community;
+       unsigned pin = irqd_to_hwirq(d);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pctrl->lock, flags);
+
+       community = intel_get_community(pctrl, pin);
+       if (community) {
+               unsigned padno = pin_to_padno(community, pin);
+               unsigned gpp_size = community->gpp_size;
+               unsigned gpp_offset = padno % gpp_size;
+               unsigned gpp = padno / gpp_size;
+               u32 value;
+
+               /* Clear interrupt status first to avoid unexpected interrupt */
+               writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+
+               value = readl(community->regs + community->ie_offset + gpp * 4);
+               value |= BIT(gpp_offset);
+               writel(value, community->regs + community->ie_offset + gpp * 4);
+       }
+
+       spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
 static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
                value |= PADCFG0_RXINV;
        } else if (type & IRQ_TYPE_EDGE_RISING) {
                value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
-       } else if (type & IRQ_TYPE_LEVEL_LOW) {
-               value |= PADCFG0_RXINV;
+       } else if (type & IRQ_TYPE_LEVEL_MASK) {
+               if (type & IRQ_TYPE_LEVEL_LOW)
+                       value |= PADCFG0_RXINV;
        } else {
                value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
        }
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 
 static struct irq_chip intel_gpio_irqchip = {
        .name = "intel-gpio",
+       .irq_enable = intel_gpio_irq_enable,
        .irq_ack = intel_gpio_irq_ack,
        .irq_mask = intel_gpio_irq_mask,
        .irq_unmask = intel_gpio_irq_unmask,
index 2bbe6f7964a79d9099bd512f2ac430f0b2d96861..6ab8c3ccdeea4cd8b82504f707382f4a6e3c65ce 100644 (file)
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
        struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
        int eint_num, virq, eint_offset;
        unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
-       static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+       static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+                                               128000, 256000};
        const struct mtk_desc_pin *pin;
        struct irq_data *d;
 
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
        if (!mtk_eint_can_en_debounce(pctl, eint_num))
                return -ENOSYS;
 
-       dbnc = ARRAY_SIZE(dbnc_arr);
-       for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
-               if (debounce <= dbnc_arr[i]) {
+       dbnc = ARRAY_SIZE(debounce_time);
+       for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+               if (debounce <= debounce_time[i]) {
                        dbnc = i;
                        break;
                }
index 352406108fa0a3572aa41043a79042c73ec5d1cf..c8969dd494497c88d75d1525e1f7f646998bb388 100644 (file)
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
                int val;
 
                if (pull)
-                       pullidx = data_out ? 1 : 2;
+                       pullidx = data_out ? 2 : 1;
 
                seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
                           gpio,
index 856f736cb1a6f0c3da0635da891965831a0f5b0f..2673cd9d106ef60e7d2a6417b5cf6599e7655f47 100644 (file)
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
        "mfio83",
 };
 
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
        "mfio84",
 };
 
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
        "mfio85",
 };
 
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
        "mfio86",
 };
 
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
        "mfio87",
 };
 
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
        "mfio88",
 };
 
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
        "mfio89",
 };
 
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
        PISTACHIO_FUNCTION_DREQ4,
        PISTACHIO_FUNCTION_DREQ5,
        PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+       PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+       PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+       PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
        PISTACHIO_FUNCTION_SYS_PLL_LOCK,
        PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
        PISTACHIO_FUNCTION_BT_PLL_LOCK,
-       PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
-       PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
-       PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
        PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
        PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
        PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
        FUNCTION(dreq4),
        FUNCTION(dreq5),
        FUNCTION(mips_pll_lock),
+       FUNCTION(audio_pll_lock),
+       FUNCTION(rpu_v_pll_lock),
+       FUNCTION(rpu_l_pll_lock),
        FUNCTION(sys_pll_lock),
        FUNCTION(wifi_pll_lock),
        FUNCTION(bt_pll_lock),
-       FUNCTION(rpu_v_pll_lock),
-       FUNCTION(rpu_l_pll_lock),
-       FUNCTION(audio_pll_lock),
        FUNCTION(debug_raw_cca_ind),
        FUNCTION(debug_ed_sec20_cca_ind),
        FUNCTION(debug_ed_sec40_cca_ind),
index fb126d56ad40d4c381230978f4da49baca049d40..cf9bafa10acfb51ad42af96cf6580d6de48c1b1d 100644 (file)
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
 
                /* Parse pins in each row from LSB */
                while (mask) {
-                       bit_pos = ffs(mask);
+                       bit_pos = __ffs(mask);
                        pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
-                       mask_pos = ((pcs->fmask) << (bit_pos - 1));
+                       mask_pos = ((pcs->fmask) << bit_pos);
                        val_pos = val & mask_pos;
                        submask = mask & mask_pos;
 
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
        ret = of_property_read_u32(np, "pinctrl-single,function-mask",
                                   &pcs->fmask);
        if (!ret) {
-               pcs->fshift = ffs(pcs->fmask) - 1;
+               pcs->fshift = __ffs(pcs->fmask);
                pcs->fmax = pcs->fmask >> pcs->fshift;
        } else {
                /* If mask property doesn't exist, function mux is invalid. */
index 412c6b78140aa72918619f55743f8ac5666d6ffc..a13f2b6f6fc0b4572a61d889922498027accfd49 100644 (file)
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
        return 0;
 }
 
+/*
+ * gpiolib gpiod_to_irq callback function.
+ * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
+ */
+static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
+       int i;
+
+       for (i = 0; i < info->num_exin; i++)
+               if (info->exin[i] == offset)
+                       return ltq_eiu_get_irq(i);
+
+       return -1;
+}
+
 static struct gpio_chip xway_chip = {
        .label = "gpio-xway",
        .direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
        .set = xway_gpio_set,
        .request = gpiochip_generic_request,
        .free = gpiochip_generic_free,
+       .to_irq = xway_gpio_to_irq,
        .base = -1,
 };
 
index b5d81ced6ce65b56487971e27c50728a2ab562ab..b68ae424cee247d51541d4ceb36aaa8c8c9132f6 100644 (file)
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
                .pins = gpio##id##_pins,                \
                .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
                .funcs = (int[]){                       \
-                       qca_mux_NA, /* gpio mode */     \
+                       qca_mux_gpio, /* gpio mode */   \
                        qca_mux_##f1,                   \
                        qca_mux_##f2,                   \
                        qca_mux_##f3,                   \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
                        qca_mux_##f14                   \
                },                                      \
                .nfuncs = 15,                           \
-               .ctl_reg = 0x1000 + 0x10 * id,          \
-               .io_reg = 0x1004 + 0x10 * id,           \
-               .intr_cfg_reg = 0x1008 + 0x10 * id,     \
-               .intr_status_reg = 0x100c + 0x10 * id,  \
-               .intr_target_reg = 0x400 + 0x4 * id,    \
+               .ctl_reg = 0x0 + 0x1000 * id,           \
+               .io_reg = 0x4 + 0x1000 * id,            \
+               .intr_cfg_reg = 0x8 + 0x1000 * id,      \
+               .intr_status_reg = 0xc + 0x1000 * id,   \
+               .intr_target_reg = 0x8 + 0x1000 * id,   \
                .mux_bit = 2,                   \
                .pull_bit = 0,                  \
                .drv_bit = 6,                   \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
        .nfunctions = ARRAY_SIZE(ipq4019_functions),
        .groups = ipq4019_groups,
        .ngroups = ARRAY_SIZE(ipq4019_groups),
-       .ngpios = 70,
+       .ngpios = 100,
 };
 
 static int ipq4019_pinctrl_probe(struct platform_device *pdev)
index dc3609f0c60b6b50d10c76bba38b4f73952d5c1e..ee0c1f2567d9f3eec5fe11401ff4d3a4384b7cfc 100644 (file)
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       pinctrl_provide_dummies();
+       /* Enable dummy states for those platforms without pinctrl support */
+       if (!of_have_populated_dt())
+               pinctrl_provide_dummies();
 
        ret = sh_pfc_init_ranges(pfc);
        if (ret < 0)
index 00265f0435a77d83998bd6db377a76a3c4a5ed3a..8b381d69df8632c806f45bde5a5d1e6645b35d40 100644 (file)
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
        .pins = sun8i_a33_pins,
        .npins = ARRAY_SIZE(sun8i_a33_pins),
        .irq_banks = 2,
+       .irq_bank_base = 1,
 };
 
 static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
index 12a1dfabb1af3c56cf6abf0e0b3c52044bd3c45d..3b017dbd289cad993c6b6882c153364bed53d6ff 100644 (file)
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
 static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-       u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+       u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
        u8 index = sunxi_irq_cfg_offset(d->hwirq);
        unsigned long flags;
        u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 static void sunxi_pinctrl_irq_ack(struct irq_data *d)
 {
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-       u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+       u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+                                             pctl->desc->irq_bank_base);
        u8 status_idx = sunxi_irq_status_offset(d->hwirq);
 
        /* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
 static void sunxi_pinctrl_irq_mask(struct irq_data *d)
 {
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-       u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+       u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
        u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
        unsigned long flags;
        u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
 static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
 {
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-       u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+       u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
        u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
        unsigned long flags;
        u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
        if (bank == pctl->desc->irq_banks)
                return;
 
-       reg = sunxi_irq_status_reg_from_bank(bank);
+       reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
        val = readl(pctl->membase + reg);
 
        if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
 
        for (i = 0; i < pctl->desc->irq_banks; i++) {
                /* Mask and clear all IRQs before registering a handler */
-               writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+               writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+                                               pctl->desc->irq_bank_base));
                writel(0xffffffff,
-                       pctl->membase + sunxi_irq_status_reg_from_bank(i));
+                      pctl->membase + sunxi_irq_status_reg_from_bank(i,
+                                               pctl->desc->irq_bank_base));
 
                irq_set_chained_handler_and_data(pctl->irq[i],
                                                 sunxi_pinctrl_irq_handler,
index e248e81a0f9e0f8446cbd634040c93632b5e1268..0afce1ab12d039515a1ea40f66bdb3806ca9f5ca 100644 (file)
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
        int                             npins;
        unsigned                        pin_base;
        unsigned                        irq_banks;
+       unsigned                        irq_bank_base;
        bool                            irq_read_needs_mux;
 };
 
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
        return pin_num * PULL_PINS_BITS;
 }
 
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
 {
        u8 bank = irq / IRQ_PER_BANK;
        u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
 
-       return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+       return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
 }
 
 static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
        return irq_num * IRQ_CFG_IRQ_BITS;
 }
 
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
 {
-       return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+       return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
 }
 
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
 {
        u8 bank = irq / IRQ_PER_BANK;
 
-       return sunxi_irq_ctrl_reg_from_bank(bank);
+       return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
 }
 
 static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
        return irq_num * IRQ_CTRL_IRQ_BITS;
 }
 
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
 {
-       return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+       return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
 }
 
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
 {
        u8 bank = irq / IRQ_PER_BANK;
 
-       return sunxi_irq_status_reg_from_bank(bank);
+       return sunxi_irq_status_reg_from_bank(bank, bank_base);
 }
 
 static inline u32 sunxi_irq_status_offset(u16 irq)
index 9973cebb4d6fe7ac98e8551f35b4ca459d8f7c25..07462d79d04000685c946a19a9cef1d90526400d 100644 (file)
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                 * much memory to the process.
                 */
                down_read(&current->mm->mmap_sem);
-               ret = get_user_pages(current, current->mm, address, 1,
-                                    !is_write, 0, &page, NULL);
+               ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
                up_read(&current->mm->mmap_sem);
                if (ret < 0)
                        break;
index 10ce6cba4455c206e8ab23906db520b880d4a95a..09356684c32f6a78f03dbc5ef1dd7addc828bf08 100644 (file)
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
        arg0.integer.value = reg;
 
        status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
        *ret = lret;
-       return (status != AE_OK) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
 DEFINE_CONV(normal, 1, 2, 3);
 DEFINE_CONV(y_inverted, 1, -2, 3);
 DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
 DEFINE_CONV(z_inverted, 1, 2, -3);
 DEFINE_CONV(xy_swap, 2, 1, 3);
 DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
        AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
        AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+       AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
index f93abc8c1424ad956f62b8dbb5a6dadffd6b4c5b..a818db6aa08f22dfaf8eca3a5b02049a31363cbd 100644 (file)
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
 }
 
 static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+       .freeze  = intel_hid_pl_suspend_handler,
+       .restore  = intel_hid_pl_resume_handler,
        .suspend  = intel_hid_pl_suspend_handler,
        .resume  = intel_hid_pl_resume_handler,
 };
index 3fb1d85c70a89d8d89ab700627691f70a63d03c1..6f497e80c9df220df88563737f33ff701569f31b 100644 (file)
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        ipcdev.acpi_io_size = size;
        dev_info(&pdev->dev, "io res: %pR\n", res);
 
-       /* This is index 0 to cover BIOS data register */
        punit_res = punit_res_array;
+       /* This is index 0 to cover BIOS data register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_DATA_INDEX);
        if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        *punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
 
+       /* This is index 1 to cover BIOS interface register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_IFACE_INDEX);
        if (!res) {
                dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
                return -ENXIO;
        }
-       /* This is index 1 to cover BIOS interface register */
        *++punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
 
+       /* This is index 2 to cover ISP data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
        }
-       /* This is index 2 to cover ISP data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
 
+       /* This is index 3 to cover ISP interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
        }
-       /* This is index 3 to cover ISP interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
 
+       /* This is index 4 to cover GTD data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
        }
-       /* This is index 4 to cover GTD data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
 
+       /* This is index 5 to cover GTD interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
        }
-       /* This is index 5 to cover GTD interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_IPC_INDEX);
index bd875409a02dd3fbc13f8b61d372604e62f8abf1..a47a41fc10ad77c427158811857da4bc21cd747f 100644 (file)
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
        struct resource *res;
        void __iomem *addr;
 
+       /*
+        * The following resources are required
+        * - BIOS_IPC BASE_DATA
+        * - BIOS_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
                return PTR_ERR(addr);
        punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
 
+       /*
+        * The following resources are optional
+        * - ISPDRIVER_IPC BASE_DATA
+        * - ISPDRIVER_IPC BASE_IFACE
+        * - GTDRIVER_IPC BASE_DATA
+        * - GTDRIVER_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        return 0;
 }
index 397119f83e82384913a305065a70f1ca7ae815d8..781bd10ca7ac5ba147e4e718d1d54577a7626e33 100644 (file)
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
 {
        u32 telem_ctrl = 0;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&(telm_conf->telem_lock));
        if (ioss_period) {
index e305ab541a2227e1f94ff3e80a0da369ed385946..9255ff3ee81ac74e5fc269f256aed955132e730d 100644 (file)
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
                fan_update_desired_level(s);
        mutex_unlock(&fan_mutex);
 
+       if (rc)
+               return rc;
        if (status)
                *status = s;
 
-       return rc;
+       return 0;
 }
 
 static int fan_get_speed(unsigned int *speed)
index df1f1a76a8629fdf4c1b709ff519040ffad18552..01e12d221a8b9b819b2ff396f2231a0a51492b76 100644 (file)
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
 /* Field definitions */
 #define HCI_ACCEL_MASK                 0x7fff
 #define HCI_HOTKEY_DISABLE             0x0b
-#define HCI_HOTKEY_ENABLE              0x01
+#define HCI_HOTKEY_ENABLE              0x09
 #define HCI_HOTKEY_SPECIAL_FUNCTIONS   0x10
 #define HCI_LCD_BRIGHTNESS_BITS                3
 #define HCI_LCD_BRIGHTNESS_SHIFT       (16-HCI_LCD_BRIGHTNESS_BITS)
index cdfd01f0adb84f96a1bacf391a28419fe6564d4d..8fad0a7044d3d332b8733eaff65d2fb33dd46e61 100644 (file)
@@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
        RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
        RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+       RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
        RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
        RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
        RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
index 7225ac6b3df5be2faa7382662bbb708286813b8a..fad968eb75f628a8a40abeb62ccc08c61b3d435c 100644 (file)
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
 
        .max_register = FTM_PWMLOAD,
        .volatile_reg = fsl_pwm_volatile_reg,
-       .cache_type = REGCACHE_RBTREE,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int fsl_pwm_probe(struct platform_device *pdev)
index 9607bc8264600e68fecfc403b6867d73435e3d77..e165b7ce29d7dda18c6d84306a73cdba28818b6c 100644 (file)
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
        struct list_head node;
        struct mport_dev *md;
        enum rio_mport_map_dir dir;
-       u32 rioid;
+       u16 rioid;
        u64 rio_addr;
        dma_addr_t phys_addr; /* for mmap */
        void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
 
 struct rio_mport_dma_map {
        int valid;
-       uint64_t length;
+       u64 length;
        void *vaddr;
        dma_addr_t paddr;
 };
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
        struct kfifo            event_fifo;
        wait_queue_head_t       event_rx_wait;
        spinlock_t              fifo_lock;
-       unsigned int            event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+       u32                     event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_chan         *dmach;
        struct list_head        async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                offset += 4;
        }
 
-       if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+                                  buffer, maint_io.length)))
                ret = -EFAULT;
 out:
        vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -ENOMEM;
        length = maint_io.length;
 
-       if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+       if (unlikely(copy_from_user(buffer,
+                       (void __user *)(uintptr_t)maint_io.buffer, length))) {
                ret = -EFAULT;
                goto out;
        }
@@ -360,7 +364,7 @@ out:
  */
 static int
 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
-                                 u32 rioid, u64 raddr, u32 size,
+                                 u16 rioid, u64 raddr, u32 size,
                                  dma_addr_t *paddr)
 {
        struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -394,7 +398,7 @@ err_map_outb:
 
 static int
 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
-                              u32 rioid, u64 raddr, u32 size,
+                              u16 rioid, u64 raddr, u32 size,
                               dma_addr_t *paddr)
 {
        struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
        dma_addr_t paddr;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
 
        map.handle = paddr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+       if (unlikely(copy_to_user(arg, &map, sizeof(map))))
                return -EFAULT;
        return 0;
 }
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_outb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint16_t hdid;
+       u16 hdid;
 
-       if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+       if (copy_from_user(&hdid, arg, sizeof(hdid)))
                return -EFAULT;
 
        md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t comptag;
+       u32 comptag;
 
-       if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+       if (copy_from_user(&comptag, arg, sizeof(comptag)))
                return -EFAULT;
 
        rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
  * @xfer: data transfer descriptor structure
  */
 static int
-rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
                 enum rio_transfer_sync sync, enum dma_data_direction dir,
                 struct rio_transfer_io *xfer)
 {
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
                unsigned long offset;
                long pinned;
 
-               offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+               offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
                nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
 
                page_list = kmalloc_array(nr_pages,
@@ -886,7 +890,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
                }
 
                down_read(&current->mm->mmap_sem);
-               pinned = get_user_pages(current, current->mm,
+               pinned = get_user_pages(
                                (unsigned long)xfer->loc_addr & PAGE_MASK,
                                nr_pages, dir == DMA_FROM_DEVICE, 0,
                                page_list, NULL);
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
        if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
                return -EFAULT;
 
-       if (transaction.count != 1)
+       if (transaction.count != 1) /* only single transfer for now */
                return -EINVAL;
 
        if ((transaction.transfer_mode &
             priv->md->properties.transfer_mode) == 0)
                return -ENODEV;
 
-       transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+       transfer = vmalloc(transaction.count * sizeof(*transfer));
        if (!transfer)
                return -ENOMEM;
 
-       if (unlikely(copy_from_user(transfer, transaction.block,
-             transaction.count * sizeof(struct rio_transfer_io)))) {
+       if (unlikely(copy_from_user(transfer,
+                                   (void __user *)(uintptr_t)transaction.block,
+                                   transaction.count * sizeof(*transfer)))) {
                ret = -EFAULT;
                goto out_free;
        }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
                ret = rio_dma_transfer(filp, transaction.transfer_mode,
                        transaction.sync, dir, &transfer[i]);
 
-       if (unlikely(copy_to_user(transaction.block, transfer,
-             transaction.count * sizeof(struct rio_transfer_io))))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+                                 transfer,
+                                 transaction.count * sizeof(*transfer))))
                ret = -EFAULT;
 
 out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
 }
 
 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
-                       uint64_t size, struct rio_mport_mapping **mapping)
+                       u64 size, struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
        struct rio_mport_mapping *mapping = NULL;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
 
        map.dma_handle = mapping->phys_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                mutex_lock(&md->buf_mutex);
                kref_put(&mapping->ref, mport_release_mapping);
                mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
        int ret = -EFAULT;
        struct rio_mport_mapping *map, *_map;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
        rmcd_debug(EXIT, "filp=%p", filp);
 
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
 
 static int
 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
-                               u64 raddr, u32 size,
+                               u64 raddr, u64 size,
                                struct rio_mport_mapping **mapping)
 {
        struct rio_mport *mport = md->mport;
        struct rio_mport_mapping *map;
        int ret;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       /* rio_map_inb_region() accepts u32 size */
+       if (size > 0xffffffff)
+               return -EINVAL;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
 
        if (raddr == RIO_MAP_ANY_ADDR)
                raddr = map->phys_addr;
-       ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+       ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
        if (ret < 0)
                goto err_map_inb;
 
@@ -1288,7 +1298,7 @@ err_dma_alloc:
 
 static int
 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
-                             u64 raddr, u32 size,
+                             u64 raddr, u64 size,
                              struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
 
        if (!md->mport->ops->map_inb)
                return -EPROTONOSUPPORT;
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
        map.handle = mapping->phys_addr;
        map.rio_addr = mapping->rio_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                /* Delete mapping if it was created by this request */
                if (ret == 0 && mapping->filp == filp) {
                        mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_inb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t port_idx = md->mport->index;
+       u32 port_idx = md->mport->index;
 
        rmcd_debug(MPORT, "port_index=%d", port_idx);
 
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
        handled = 0;
        spin_lock(&data->db_lock);
        list_for_each_entry(db_filter, &data->doorbells, data_node) {
-               if (((db_filter->filter.rioid == 0xffffffff ||
+               if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
                      db_filter->filter.rioid == src)) &&
                      info >= db_filter->filter.low &&
                      info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
        if (copy_from_user(&filter, arg, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.low > filter.high)
+               return -EINVAL;
+
        spin_lock_irqsave(&priv->md->db_lock, flags);
        list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
                if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
                return -EEXIST;
        }
 
-       size = sizeof(struct rio_dev);
+       size = sizeof(*rdev);
        mport = md->mport;
-       destid = (u16)dev_info.destid;
-       hopcount = (u8)dev_info.hopcount;
+       destid = dev_info.destid;
+       hopcount = dev_info.hopcount;
 
        if (rio_mport_read_config_32(mport, destid, hopcount,
                                     RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
                do {
                        rdev = rio_get_comptag(dev_info.comptag, rdev);
                        if (rdev && rdev->dev.parent == &mport->net->dev &&
-                           rdev->destid == (u16)dev_info.destid &&
-                           rdev->hopcount == (u8)dev_info.hopcount)
+                           rdev->destid == dev_info.destid &&
+                           rdev->hopcount == dev_info.hopcount)
                                break;
                } while (rdev);
        }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
                return maint_port_idx_get(data, (void __user *)arg);
        case RIO_MPORT_GET_PROPERTIES:
                md->properties.hdid = md->mport->host_deviceid;
-               if (copy_to_user((void __user *)arg, &(data->md->properties),
-                                sizeof(data->md->properties)))
+               if (copy_to_user((void __user *)arg, &(md->properties),
+                                sizeof(md->properties)))
                        return -EFAULT;
                return 0;
        case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
        case RIO_DISABLE_PORTWRITE_RANGE:
                return rio_mport_remove_pw_filter(data, (void __user *)arg);
        case RIO_SET_EVENT_MASK:
-               data->event_mask = arg;
+               data->event_mask = (u32)arg;
                return 0;
        case RIO_GET_EVENT_MASK:
                if (copy_to_user((void __user *)arg, &data->event_mask,
-                                   sizeof(data->event_mask)))
+                                   sizeof(u32)))
                        return -EFAULT;
                return 0;
        case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
                        return -EINVAL;
 
                ret = rio_mport_send_doorbell(mport,
-                                             (u16)event.u.doorbell.rioid,
+                                             event.u.doorbell.rioid,
                                              event.u.doorbell.payload);
                if (ret < 0)
                        return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        struct mport_dev *md;
        struct rio_mport_attr attr;
 
-       md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+       md = kzalloc(sizeof(*md), GFP_KERNEL);
        if (!md) {
                rmcd_error("Unable allocate a device object");
                return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        /* The transfer_mode property will be returned through mport query
         * interface
         */
-#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
 #else
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
@@ -2669,9 +2682,9 @@ static int __init mport_init(void)
 
        /* Create device class needed by udev */
        dev_class = class_create(THIS_MODULE, DRV_NAME);
-       if (!dev_class) {
+       if (IS_ERR(dev_class)) {
                rmcd_error("Unable to create " DRV_NAME " class");
-               return -EINVAL;
+               return PTR_ERR(dev_class);
        }
 
        ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
index 6bb04d453247ced25ed92e854888d6854b50f882..6f056caa8a56c6ab22ad949db5c43d9fbe8e6844 100644 (file)
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
        }
 
        ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
-       if (!ddata->boot_base) {
+       if (IS_ERR(ddata->boot_base)) {
                dev_err(dev, "Boot base not found\n");
-               return -EINVAL;
+               return PTR_ERR(ddata->boot_base);
        }
 
        err = of_property_read_u32_index(np, "st,syscfg", 1,
index b2156ee5bae1a50f72b8cbf69d68741ef0082671..ecb7dbae9be9daa0ee96ede34a446a78e8e7debd 100644 (file)
@@ -863,7 +863,7 @@ out:
  * A user-initiated temperature conversion is not started by this function,
  * so the temperature is updated once every 64 seconds.
  */
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
 {
        struct ds1307 *ds1307 = dev_get_drvdata(dev);
        u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        int ret;
-       s16 temp;
+       s32 temp;
 
        ret = ds3231_hwmon_read_temp(dev, &temp);
        if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
                return PTR_ERR(ds1307->rtc);
        }
 
-       if (ds1307_can_wakeup_device) {
+       if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
                /* Disable request for an IRQ */
                want_irq = false;
                dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
index 1bce9cf51b1e917632fe97bcbf636293e5442adc..b83908670a9ab0a229029353b669cbd82956eca2 100644 (file)
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
        blk_cleanup_queue(dev_info->dcssblk_queue);
        dev_info->gd->queue = NULL;
        put_disk(dev_info->gd);
-       device_unregister(&dev_info->dev);
 
        /* unload all related segments */
        list_for_each_entry(entry, &dev_info->seg_list, lh)
                segment_unload(entry->segment_name);
 
-       put_device(&dev_info->dev);
        up_write(&dcssblk_devices_sem);
 
+       device_unregister(&dev_info->dev);
+       put_device(&dev_info->dev);
+
        rc = count;
 out_buf:
        kfree(local_buf);
index 75d9896deccb96da19432dcf7b96d339651d625e..e6f54d3b89690613e94d388f1e325adfd32b25ad 100644 (file)
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
                if (req->cmd_type != REQ_TYPE_FS) {
                        blk_start_request(req);
                        blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
-                       blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
 
index 648cb86afd427776abc0d109f0982284d094699a..ea607a4a1bddaf3e41165aebed1fd787b87d754e 100644 (file)
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
 {
        struct sclp_ctl_sccb ctl_sccb;
        struct sccb_header *sccb;
+       unsigned long copied;
        int rc;
 
        if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
        sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
        if (!sccb)
                return -ENOMEM;
-       if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+       copied = PAGE_SIZE -
+               copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+       if (offsetof(struct sccb_header, length) +
+           sizeof(sccb->length) > copied || sccb->length > copied) {
                rc = -EFAULT;
                goto out_free;
        }
-       if (sccb->length > PAGE_SIZE || sccb->length < 8)
-               return -EINVAL;
-       if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
-               rc = -EFAULT;
+       if (sccb->length < 8) {
+               rc = -EINVAL;
                goto out_free;
        }
        rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
index c3e22523faf36b2b21aa83dae388d2f2abf17206..ad17fc5883f61f61bc509f6d95754c881fca1c24 100644 (file)
@@ -642,7 +642,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
 
        kfree(header);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        skb_queue_tail(&ch->sweep_queue, sweep_skb);
 
        fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
@@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
        if (ctcm_test_and_set_busy(dev))
                return NETDEV_TX_BUSY;
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
                return NETDEV_TX_BUSY;
        return NETDEV_TX_OK;
@@ -994,7 +994,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
                                        goto done;
        }
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
                CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
                        "%s(%s): device error - dropped",
index edf16bfba8eeafa78a41eea97a3ac0664cbd3e9f..c103fc7efe9fd4384edaa6365d83805267733d07 100644 (file)
@@ -671,7 +671,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
 
        kfree(header);
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        skb_queue_tail(&ch->sweep_queue, sweep_skb);
 
        fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
index 0ba3a2f81750f6785bdd107613d9a98069e88662..b0e8ffdf864b049c1d72797c039b91a6e29d373e 100644 (file)
@@ -1407,7 +1407,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
                IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
                return NETDEV_TX_BUSY;
        }
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        rc = netiucv_transmit_skb(privptr->conn, skb);
        netiucv_clear_busy(dev);
        return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
index 787153764120cc8db03ef577cb485ffa765b7735..b7b74776e2ff581922b6b070554c62f4a9e35dbc 100644 (file)
@@ -3481,7 +3481,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                }
        }
 
-       queue->card->dev->trans_start = jiffies;
+       netif_trans_update(queue->card->dev);
        if (queue->card->options.performance_stats) {
                queue->card->perf_stats.outbound_do_qdio_cnt++;
                queue->card->perf_stats.outbound_do_qdio_start_time =
index 21a67ed047e8741889ee2a254989f55d095ddbd8..ff6caab8cc8b7c7105d465963b4f84579d1c771e 100644 (file)
@@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev)
                else if (depth < 2)
                        depth = 2;
                scsi_change_queue_depth(sdev, depth);
-       } else
+       } else {
                scsi_change_queue_depth(sdev, 1);
 
                sdev->tagged_supported = 1;
+       }
 
        return 0;
 }
index f3bb7af4e984ed50471aa75de673c4287d297503..ead83a24bcd1ea000b89e3fb6e6b3b0531c588d3 100644 (file)
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
 {
        struct flowi6 fl;
 
+       memset(&fl, 0, sizeof(fl));
        if (saddr)
                memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
        if (daddr)
index 35968bdb48666e9603de837aefecd687ed5cbcea..8fb9643fe6e31f90cb774407dbb617d39a0dd913 100644 (file)
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
                atomic64_set(&afu->room, room);
                if (room)
                        goto write_rrin;
-               udelay(nretry);
+               udelay(1 << nretry);
        } while (nretry++ < MC_ROOM_RETRY_CNT);
 
        pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
                if (rrin != 0x1)
                        break;
                /* Double delay each time */
-               udelay(2 << nretry);
+               udelay(1 << nretry);
        } while (nretry++ < MC_ROOM_RETRY_CNT);
 }
 
@@ -338,7 +338,7 @@ retry:
                        atomic64_set(&afu->room, room);
                        if (room)
                                goto write_ioarrin;
-                       udelay(nretry);
+                       udelay(1 << nretry);
                } while (nretry++ < MC_ROOM_RETRY_CNT);
 
                dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
                 * afu->room.
                 */
                if (nretry++ < MC_ROOM_RETRY_CNT) {
-                       udelay(nretry);
+                       udelay(1 << nretry);
                        goto retry;
                }
 
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
 }
 
 /**
- * term_mc() - terminates the master context
+ * term_intr() - disables all AFU interrupts
  * @cfg:       Internal structure associated with the host.
  * @level:     Depth of allocation, where to begin waterfall tear down.
  *
  * Safe to call with AFU/MC in partially allocated/initialized state.
  */
-static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
 {
-       int rc = 0;
        struct afu *afu = cfg->afu;
        struct device *dev = &cfg->dev->dev;
 
        if (!afu || !cfg->mcctx) {
-               dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
-                      __func__);
+               dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
                return;
        }
 
        switch (level) {
-       case UNDO_START:
-               rc = cxl_stop_context(cfg->mcctx);
-               BUG_ON(rc);
        case UNMAP_THREE:
                cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
        case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
                cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
        case FREE_IRQ:
                cxl_free_afu_irqs(cfg->mcctx);
-       case RELEASE_CONTEXT:
-               cfg->mcctx = NULL;
+               /* fall through */
+       case UNDO_NOOP:
+               /* No action required */
+               break;
+       }
+}
+
+/**
+ * term_mc() - terminates the master context
+ * @cfg:       Internal structure associated with the host.
+ * @level:     Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg)
+{
+       int rc = 0;
+       struct afu *afu = cfg->afu;
+       struct device *dev = &cfg->dev->dev;
+
+       if (!afu || !cfg->mcctx) {
+               dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+               return;
        }
+
+       rc = cxl_stop_context(cfg->mcctx);
+       WARN_ON(rc);
+       cfg->mcctx = NULL;
 }
 
 /**
@@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
  */
 static void term_afu(struct cxlflash_cfg *cfg)
 {
+       /*
+        * Tear down is carefully orchestrated to ensure
+        * no interrupts can come in when the problem state
+        * area is unmapped.
+        *
+        * 1) Disable all AFU interrupts
+        * 2) Unmap the problem state area
+        * 3) Stop the master context
+        */
+       term_intr(cfg, UNMAP_THREE);
        if (cfg->afu)
                stop_afu(cfg);
 
-       term_mc(cfg, UNDO_START);
+       term_mc(cfg);
 
        pr_debug("%s: returning\n", __func__);
 }
@@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
 }
 
 /**
- * init_mc() - create and register as the master context
+ * init_intr() - setup interrupt handlers for the master context
  * @cfg:       Internal structure associated with the host.
  *
  * Return: 0 on success, -errno on failure
  */
-static int init_mc(struct cxlflash_cfg *cfg)
+static enum undo_level init_intr(struct cxlflash_cfg *cfg,
+                                struct cxl_context *ctx)
 {
-       struct cxl_context *ctx;
-       struct device *dev = &cfg->dev->dev;
        struct afu *afu = cfg->afu;
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
-       enum undo_level level;
-
-       ctx = cxl_get_context(cfg->dev);
-       if (unlikely(!ctx))
-               return -ENOMEM;
-       cfg->mcctx = ctx;
-
-       /* Set it up as a master with the CXL */
-       cxl_set_master(ctx);
-
-       /* During initialization reset the AFU to start from a clean slate */
-       rc = cxl_afu_reset(cfg->mcctx);
-       if (unlikely(rc)) {
-               dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
-                       __func__, rc);
-               level = RELEASE_CONTEXT;
-               goto out;
-       }
+       enum undo_level level = UNDO_NOOP;
 
        rc = cxl_allocate_afu_irqs(ctx, 3);
        if (unlikely(rc)) {
                dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
                        __func__, rc);
-               level = RELEASE_CONTEXT;
+               level = UNDO_NOOP;
                goto out;
        }
 
@@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
                level = UNMAP_TWO;
                goto out;
        }
+out:
+       return level;
+}
 
-       rc = 0;
+/**
+ * init_mc() - create and register as the master context
+ * @cfg:       Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+       struct cxl_context *ctx;
+       struct device *dev = &cfg->dev->dev;
+       int rc = 0;
+       enum undo_level level;
+
+       ctx = cxl_get_context(cfg->dev);
+       if (unlikely(!ctx)) {
+               rc = -ENOMEM;
+               goto ret;
+       }
+       cfg->mcctx = ctx;
+
+       /* Set it up as a master with the CXL */
+       cxl_set_master(ctx);
+
+       /* During initialization reset the AFU to start from a clean slate */
+       rc = cxl_afu_reset(cfg->mcctx);
+       if (unlikely(rc)) {
+               dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+                       __func__, rc);
+               goto ret;
+       }
+
+       level = init_intr(cfg, ctx);
+       if (unlikely(level)) {
+               dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
+                       __func__, rc);
+               goto out;
+       }
 
        /* This performs the equivalent of the CXL_IOCTL_START_WORK.
         * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@ ret:
        pr_debug("%s: returning rc=%d\n", __func__, rc);
        return rc;
 out:
-       term_mc(cfg, level);
+       term_intr(cfg, level);
        goto ret;
 }
 
@@ -1751,7 +1803,8 @@ out:
 err2:
        kref_put(&afu->mapcount, afu_unmap);
 err1:
-       term_mc(cfg, UNDO_START);
+       term_intr(cfg, UNMAP_THREE);
+       term_mc(cfg);
        goto out;
 }
 
@@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
                if (unlikely(rc))
                        dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
                                __func__, rc);
-               stop_afu(cfg);
-               term_mc(cfg, UNDO_START);
+               term_afu(cfg);
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
                cfg->state = STATE_FAILTERM;
index 0faed422c7f47eec6ac8f474cb85f7e61dd6ed1d..eb9d8f730b38ec34e86c777d560f3762b48bf3d5 100644 (file)
 #define WWPN_BUF_LEN   (WWPN_LEN + 1)
 
 enum undo_level {
-       RELEASE_CONTEXT = 0,
+       UNDO_NOOP = 0,
        FREE_IRQ,
        UNMAP_ONE,
        UNMAP_TWO,
-       UNMAP_THREE,
-       UNDO_START
+       UNMAP_THREE
 };
 
 struct dev_dependent_vals {
index a404a41e871c23b18aa4ea10d83af322f9107b40..8eaed0522aa36e83ddbe259a4fe2293c5ac61579 100644 (file)
@@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev)
        h->sdev = NULL;
        spin_unlock(&h->pg_lock);
        if (pg) {
-               spin_lock(&pg->lock);
+               spin_lock_irq(&pg->lock);
                list_del_rcu(&h->node);
-               spin_unlock(&pg->lock);
+               spin_unlock_irq(&pg->lock);
                kref_put(&pg->kref, release_port_group);
        }
        sdev->handler_data = NULL;
index e4db5fb3239af700ad60fdf2e21cdb35267cc861..8c44b9c424afec43c10daeba23c16e96b20e127b 100644 (file)
@@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 static int
 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 {
-       int r, i;
+       int r, i, index;
        unsigned long   flags;
        u32 reply_address;
        u16 smid;
@@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
        struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
        u8 hide_flag;
        struct adapter_reply_queue *reply_q;
-       long reply_post_free;
-       u32 reply_post_free_sz, index = 0;
+       Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
 
        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
            __func__));
@@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
                _base_assign_reply_queues(ioc);
 
        /* initialize Reply Post Free Queue */
-       reply_post_free_sz = ioc->reply_post_queue_depth *
-           sizeof(Mpi2DefaultReplyDescriptor_t);
-       reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+       index = 0;
+       reply_post_free_contig = ioc->reply_post[0].reply_post_free;
        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+               /*
+                * If RDPQ is enabled, switch to the next allocation.
+                * Otherwise advance within the contiguous region.
+                */
+               if (ioc->rdpq_array_enable) {
+                       reply_q->reply_post_free =
+                               ioc->reply_post[index++].reply_post_free;
+               } else {
+                       reply_q->reply_post_free = reply_post_free_contig;
+                       reply_post_free_contig += ioc->reply_post_queue_depth;
+               }
+
                reply_q->reply_post_host_index = 0;
-               reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
-                   reply_post_free;
                for (i = 0; i < ioc->reply_post_queue_depth; i++)
                        reply_q->reply_post_free[i].Words =
                            cpu_to_le64(ULLONG_MAX);
                if (!_base_is_controller_msix_enabled(ioc))
                        goto skip_init_reply_post_free_queue;
-               /*
-                * If RDPQ is enabled, switch to the next allocation.
-                * Otherwise advance within the contiguous region.
-                */
-               if (ioc->rdpq_array_enable)
-                       reply_post_free = (long)
-                           ioc->reply_post[++index].reply_post_free;
-               else
-                       reply_post_free += reply_post_free_sz;
        }
  skip_init_reply_post_free_queue:
 
index b1bf42b93fccb23249e4341e3ce56f6b8c84c5d5..1deb6adc411f795833a444a84b1e80e9f9629c4d 100644 (file)
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
        int pg83_supported = 0;
        unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
 
-       if (sdev->skip_vpd_pages)
+       if (!scsi_device_supports_vpd(sdev))
                return;
+
 retry_pg0:
        vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
        if (!vpd_buf)
index 92ffd2406f97d72fdf08f12ffb07ef2d53627409..2b642b145be1b8a6be08e7eb3829eb6ee357eb4f 100644 (file)
@@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
        return name;
 }
 
+#ifdef CONFIG_SCSI_DH
 static const struct {
        unsigned char   value;
        char            *name;
@@ -94,7 +95,7 @@ static const struct {
        { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
 };
 
-const char *scsi_access_state_name(unsigned char state)
+static const char *scsi_access_state_name(unsigned char state)
 {
        int i;
        char *name = NULL;
@@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state)
        }
        return name;
 }
+#endif
 
 static int check_set(unsigned long long *val, char *src)
 {
@@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
 }
 
 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
-struct device_attribute dev_attr_hstate =
+static struct device_attribute dev_attr_hstate =
        __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
 
 static ssize_t
@@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
        NULL
 };
 
-struct attribute_group scsi_shost_attr_group = {
+static struct attribute_group scsi_shost_attr_group = {
        .attrs =        scsi_sysfs_shost_attrs,
 };
 
index 5a5457ac9cdb4560a29be20d28f3a31b4556b578..f52b74cf8d1e691a10676f00c2012f809cc940f8 100644 (file)
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
        struct scsi_device *sdp = sdkp->device;
        struct Scsi_Host *host = sdp->host;
+       sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
        int diskinfo[4];
 
        /* default to most commonly used values */
-        diskinfo[0] = 0x40;    /* 1 << 6 */
-               diskinfo[1] = 0x20;     /* 1 << 5 */
-               diskinfo[2] = sdkp->capacity >> 11;
-       
+       diskinfo[0] = 0x40;     /* 1 << 6 */
+       diskinfo[1] = 0x20;     /* 1 << 5 */
+       diskinfo[2] = capacity >> 11;
+
        /* override with calculated, extended default, or driver values */
        if (host->hostt->bios_param)
-               host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+               host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
        else
-               scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+               scsicam_bios_param(bdev, capacity, diskinfo);
 
        geo->heads = diskinfo[0];
        geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
        if (sdkp->capacity > 0xffffffff)
                sdp->use_16_for_rw = 1;
 
-       /* Rescale capacity to 512-byte units */
-       if (sector_size == 4096)
-               sdkp->capacity <<= 3;
-       else if (sector_size == 2048)
-               sdkp->capacity <<= 2;
-       else if (sector_size == 1024)
-               sdkp->capacity <<= 1;
-
        blk_queue_physical_block_size(sdp->request_queue,
                                      sdkp->physical_block_size);
        sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
                sdkp->ws10 = 1;
 }
 
-static int sd_try_extended_inquiry(struct scsi_device *sdp)
-{
-       /* Attempt VPD inquiry if the device blacklist explicitly calls
-        * for it.
-        */
-       if (sdp->try_vpd_pages)
-               return 1;
-       /*
-        * Although VPD inquiries can go to SCSI-2 type devices,
-        * some USB ones crash on receiving them, and the pages
-        * we currently ask for are for SPC-3 and beyond
-        */
-       if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
-               return 1;
-       return 0;
-}
-
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
-       return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
 /**
  *     sd_revalidate_disk - called the first time a new disk is seen,
  *     performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sdkp->media_present) {
                sd_read_capacity(sdkp, buffer);
 
-               if (sd_try_extended_inquiry(sdp)) {
+               if (scsi_device_supports_vpd(sdp)) {
                        sd_read_block_provisioning(sdkp);
                        sd_read_block_limits(sdkp);
                        sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sdkp->opt_xfer_blocks &&
            sdkp->opt_xfer_blocks <= dev_max &&
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-           sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+           sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
                rw_max = q->limits.io_opt =
                        sdkp->opt_xfer_blocks * sdp->sector_size;
        else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        /* Combine with controller limits */
        q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
 
-       set_capacity(disk, sdkp->capacity);
+       set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
        sd_config_write_same(sdkp);
        kfree(buffer);
 
index 5f2a84aff29fb9cbd003e5bbe6996355a5c07075..654630bb7d0edeb48438654e47d8a60151d2c87b 100644 (file)
@@ -65,7 +65,7 @@ struct scsi_disk {
        struct device   dev;
        struct gendisk  *disk;
        atomic_t        openers;
-       sector_t        capacity;       /* size in 512-byte sectors */
+       sector_t        capacity;       /* size in logical blocks */
        u32             max_xfer_blocks;
        u32             opt_xfer_blocks;
        u32             max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
        return 0;
 }
 
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+       return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
index 71c5138ddf9463cc9c7d3523dbd7ea8a698e3519..dbf1882cfbacd03efca9aa9f096cc0dcc5ae40e0 100644 (file)
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
  out_unmap:
        if (res > 0) {
                for (j=0; j < res; j++)
-                       page_cache_release(pages[j]);
+                       put_page(pages[j]);
                res = 0;
        }
        kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
                /* FIXME: cache flush missing for rw==READ
                 * FIXME: call the correct reference counting function
                 */
-               page_cache_release(page);
+               put_page(page);
        }
        kfree(STbp->mapped_pages);
        STbp->mapped_pages = NULL;
index 57e781c71e6776223b7f13fecaa3243572302cf7..837effe199071bcb319a525e4c9498b4cf2225ba 100644 (file)
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
                genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
 
                /*
-                * With CONFIG_PM disabled turn on all domains to make the
-                * hardware usable.
+                * Initially turn on all domains to make the domains usable
+                * with !CONFIG_PM and to get the hardware in sync with the
+                * software.  The unused domains will be switched off during
+                * late_init time.
                 */
-               if (!IS_ENABLED(CONFIG_PM))
-                       genpd->power_on(genpd);
+               genpd->power_on(genpd);
 
-               pm_genpd_init(genpd, NULL, true);
+               pm_genpd_init(genpd, NULL, false);
        }
 
        /*
index e7a19be87c38053f1742c785a7bfbf9d17113bfc..50769078e72e98316f5ea19efd69de536be91a72 100644 (file)
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
                         struct spi_transfer *transfer)
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
-       unsigned int bpw = transfer->bits_per_word;
+       unsigned int bpw;
 
        if (!master->dma_rx)
                return false;
 
+       if (!transfer)
+               return false;
+
+       bpw = transfer->bits_per_word;
        if (!bpw)
                bpw = spi->bits_per_word;
 
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
                struct spi_imx_config *config)
 {
-       u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+       u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
        u32 clk = config->speed_hz, delay, reg;
+       u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
        /*
         * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 
        if (config->mode & SPI_CPHA)
                cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+       else
+               cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
 
        if (config->mode & SPI_CPOL) {
                cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
                cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+       } else {
+               cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+               cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
        }
        if (config->mode & SPI_CS_HIGH)
                cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+       else
+               cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
 
        if (spi_imx->usedma)
                ctrl |= MX51_ECSPI_CTRL_SMC;
index 0caa3c8bef46c46e0ed66bf89f518cc5c5236449..43a02e377b3b168339013d797f8da7c150959c58 100644 (file)
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
 
        if (mcspi_dma->dma_tx) {
                struct dma_async_tx_descriptor *tx;
-               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
 
-               sg_init_table(&sg, 1);
-               sg_dma_address(&sg) = xfer->tx_dma;
-               sg_dma_len(&sg) = xfer->len;
-
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
-               DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+                                            xfer->tx_sg.nents, DMA_MEM_TO_DEV,
+                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_tx_callback;
                        tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
 
        if (mcspi_dma->dma_rx) {
                struct dma_async_tx_descriptor *tx;
-               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
 
                if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
                        dma_count -= es;
 
-               sg_init_table(&sg, 1);
-               sg_dma_address(&sg) = xfer->rx_dma;
-               sg_dma_len(&sg) = dma_count;
-
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
-                               DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
-                               DMA_CTRL_ACK);
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
+                                            xfer->rx_sg.nents, DMA_DEV_TO_MEM,
+                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_rx_callback;
                        tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
        omap2_mcspi_set_dma_req(spi, 1, 1);
 
        wait_for_completion(&mcspi_dma->dma_rx_completion);
-       dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
-                        DMA_FROM_DEVICE);
 
        if (mcspi->fifo_depth > 0)
                return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 
        if (tx != NULL) {
                wait_for_completion(&mcspi_dma->dma_tx_completion);
-               dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
-                                DMA_TO_DEVICE);
 
                if (mcspi->fifo_depth > 0) {
                        irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                gpio_free(spi->cs_gpio);
 }
 
+static bool omap2_mcspi_can_dma(struct spi_master *master,
+                               struct spi_device *spi,
+                               struct spi_transfer *xfer)
+{
+       if (xfer->len < DMA_MIN_BYTES)
+               return false;
+
+       return true;
+}
+
 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
                struct spi_device *spi, struct spi_transfer *t)
 {
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
                return -EINVAL;
        }
 
-       if (len < DMA_MIN_BYTES)
-               goto skip_dma_map;
-
-       if (mcspi_dma->dma_tx && tx_buf != NULL) {
-               t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
-                               len, DMA_TO_DEVICE);
-               if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
-                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
-                                       'T', len);
-                       return -EINVAL;
-               }
-       }
-       if (mcspi_dma->dma_rx && rx_buf != NULL) {
-               t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
-                               DMA_FROM_DEVICE);
-               if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
-                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
-                                       'R', len);
-                       if (tx_buf != NULL)
-                               dma_unmap_single(mcspi->dev, t->tx_dma,
-                                               len, DMA_TO_DEVICE);
-                       return -EINVAL;
-               }
-       }
-
-skip_dma_map:
        return omap2_mcspi_work_one(mcspi, spi, t);
 }
 
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
        master->transfer_one = omap2_mcspi_transfer_one;
        master->set_cs = omap2_mcspi_set_cs;
        master->cleanup = omap2_mcspi_cleanup;
+       master->can_dma = omap2_mcspi_can_dma;
        master->dev.of_node = node;
        master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
        master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
index 8f50a4020f6fff6f7c1d7e1c1f43964c734e5c3f..6c6c0013ec7a92732d2540ade876c561359f8d70 100644 (file)
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
        if (WARN_ON(rs->speed > MAX_SCLK_OUT))
                rs->speed = MAX_SCLK_OUT;
 
-       /* the minimum divsor is 2 */
+       /* the minimum divisor is 2 */
        if (rs->max_freq < 2 * rs->speed) {
                clk_set_rate(rs->spiclk, 2 * rs->speed);
                rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
        master->transfer_one = rockchip_spi_transfer_one;
        master->handle_err = rockchip_spi_handle_err;
 
-       rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
-       if (IS_ERR_OR_NULL(rs->dma_tx.ch)) {
+       rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
+       if (IS_ERR(rs->dma_tx.ch)) {
                /* Check tx to see if we need defer probing driver */
                if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
                        ret = -EPROBE_DEFER;
                        goto err_get_fifo_len;
                }
                dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+               rs->dma_tx.ch = NULL;
        }
 
-       rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
-       if (!rs->dma_rx.ch) {
-               if (rs->dma_tx.ch) {
+       rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
+       if (IS_ERR(rs->dma_rx.ch)) {
+               if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
                        dma_release_channel(rs->dma_tx.ch);
                        rs->dma_tx.ch = NULL;
+                       ret = -EPROBE_DEFER;
+                       goto err_get_fifo_len;
                }
                dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+               rs->dma_rx.ch = NULL;
        }
 
        if (rs->dma_tx.ch && rs->dma_rx.ch) {
index de2f2f90d7995a52ce4f803cf41ba32992e2664b..0239b45eed928697d9ccb10c2a83c1ef51958c16 100644 (file)
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
        struct spi_master *master =
                container_of(work, struct spi_master, pump_messages);
 
-       __spi_pump_messages(master, true, false);
+       __spi_pump_messages(master, true, master->bus_lock_flag);
 }
 
 static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
  */
 int spi_sync(struct spi_device *spi, struct spi_message *message)
 {
-       return __spi_sync(spi, message, 0);
+       return __spi_sync(spi, message, spi->master->bus_lock_flag);
 }
 EXPORT_SYMBOL_GPL(spi_sync);
 
index cf84581287b91251d3b6d116d920fc0a098b6819..5bac28a3944e2da485d191c83bc0a0bf637f23ba 100644 (file)
@@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig"
 
 source "drivers/staging/comedi/Kconfig"
 
+source "drivers/staging/olpc_dcon/Kconfig"
+
 source "drivers/staging/rtl8192u/Kconfig"
 
 source "drivers/staging/rtl8192e/Kconfig"
index 7d6448d2046499c6335f55c4122649c01d64fd8f..a954242b0f2c2036dff9f6180b8491505d9f90e0 100644 (file)
@@ -4,6 +4,7 @@ obj-y                           += media/
 obj-$(CONFIG_SLICOSS)          += slicoss/
 obj-$(CONFIG_PRISM2_USB)       += wlan-ng/
 obj-$(CONFIG_COMEDI)           += comedi/
+obj-$(CONFIG_FB_OLPC_DCON)     += olpc_dcon/
 obj-$(CONFIG_RTL8192U)         += rtl8192u/
 obj-$(CONFIG_RTL8192E)         += rtl8192e/
 obj-$(CONFIG_R8712U)           += rtl8712/
index dab4862611549815e945a4f1ef8ca39f9df2355e..13335437c69cfbd5e08a8a805a4d2a2318bf391e 100644 (file)
@@ -88,7 +88,7 @@ do {                                                              \
 } while (0)
 
 #ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE    (2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE    (2 << PAGE_SHIFT) /* 2 pages */
 #endif
 
 #define LIBCFS_ALLOC_PRE(size, mask)                                       \
index 0f2fd79e5ec8bef0bdab7c4f1dc77bff0b06e2f7..837eb22749c38c1d95f749e08d05e5bb41f98e7b 100644 (file)
@@ -57,7 +57,7 @@
 #include "../libcfs_cpu.h"
 #endif
 
-#define CFS_PAGE_MASK             (~((__u64)PAGE_CACHE_SIZE-1))
+#define CFS_PAGE_MASK             (~((__u64)PAGE_SIZE-1))
 #define page_index(p)       ((p)->index)
 
 #define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
 #if BITS_PER_LONG == 32
 /* limit to lowmem on 32-bit systems */
 #define NUM_CACHEPAGES \
-       min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+       min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
 #else
 #define NUM_CACHEPAGES totalram_pages
 #endif
index 08f193c341c54f04a96274db7abe89c6d48d80f1..1c679cb727859f81e3ef5b79f28445f3119679cb 100644 (file)
@@ -514,7 +514,7 @@ typedef struct {
        /**
         * Starting offset of the fragment within the page. Note that the
         * end of the fragment must not pass the end of the page; i.e.,
-        * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+        * kiov_len + kiov_offset <= PAGE_SIZE.
         */
        unsigned int     kiov_offset;
 } lnet_kiov_t;
index 3e1f24e77f644b142698a64822301c5bf3af4d1e..d4ce06d0aeebbe30041b3be0d71c320d07c03f20 100644 (file)
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
 
        for (nob = i = 0; i < niov; i++) {
                if ((kiov[i].kiov_offset && i > 0) ||
-                   (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+                   (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
                        return NULL;
 
                pages[i] = kiov[i].kiov_page;
index c90e5102fe06542e2c8a66c1a3435049b1dd7d6f..c3d628bac5b810db548c902663508ea5ed3746a7 100644 (file)
@@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
                max = TCD_MAX_PAGES;
        } else {
                max = max / num_possible_cpus();
-               max <<= (20 - PAGE_CACHE_SHIFT);
+               max <<= (20 - PAGE_SHIFT);
        }
        rc = cfs_tracefile_init(max);
 
index ec3bc04bd89f2250889218ff0d5188a34d057698..244eb89eef689fd124e964ba0b4ca29d2ce033ce 100644 (file)
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
        if (tcd->tcd_cur_pages > 0) {
                __LASSERT(!list_empty(&tcd->tcd_pages));
                tage = cfs_tage_from_list(tcd->tcd_pages.prev);
-               if (tage->used + len <= PAGE_CACHE_SIZE)
+               if (tage->used + len <= PAGE_SIZE)
                        return tage;
        }
 
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
         * from here: this will lead to infinite recursion.
         */
 
-       if (len > PAGE_CACHE_SIZE) {
+       if (len > PAGE_SIZE) {
                pr_err("cowardly refusing to write %lu bytes in a page\n", len);
                return NULL;
        }
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
        for (i = 0; i < 2; i++) {
                tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
                if (!tage) {
-                       if (needed + known_size > PAGE_CACHE_SIZE)
+                       if (needed + known_size > PAGE_SIZE)
                                mask |= D_ERROR;
 
                        cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
                string_buf = (char *)page_address(tage->page) +
                                        tage->used + known_size;
 
-               max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+               max_nob = PAGE_SIZE - tage->used - known_size;
                if (max_nob <= 0) {
                        printk(KERN_EMERG "negative max_nob: %d\n",
                               max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
        __LASSERT(debug_buf == string_buf);
 
        tage->used += needed;
-       __LASSERT(tage->used <= PAGE_CACHE_SIZE);
+       __LASSERT(tage->used <= PAGE_SIZE);
 
 console:
        if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
 
 int cfs_trace_allocate_string_buffer(char **str, int nob)
 {
-       if (nob > 2 * PAGE_CACHE_SIZE)      /* string must be "sensible" */
+       if (nob > 2 * PAGE_SIZE)            /* string must be "sensible" */
                return -EINVAL;
 
        *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
        }
 
        mb /= num_possible_cpus();
-       pages = mb << (20 - PAGE_CACHE_SHIFT);
+       pages = mb << (20 - PAGE_SHIFT);
 
        cfs_tracefile_write_lock();
 
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
 
        cfs_tracefile_read_unlock();
 
-       return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+       return (total_pages >> (20 - PAGE_SHIFT)) + 1;
 }
 
 static int tracefiled(void *arg)
index 4c77f9044dd35d1228f64827f8df7d61620006ba..ac84e7f4c859e917ca186e1aac7ad2bf2dec48e7 100644 (file)
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
 extern int  libcfs_panic_in_progress;
 int cfs_trace_max_debug_mb(void);
 
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 #define CFS_TRACEFILE_SIZE (500 << 20)
 
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
 /*
  * Private declare for tracefile
  */
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 
 #define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do {                                                                    \
 do {                                                               \
        __LASSERT(tage);                                        \
        __LASSERT(tage->page);                            \
-       __LASSERT(tage->used <= PAGE_CACHE_SIZE);                        \
+       __LASSERT(tage->used <= PAGE_SIZE);                      \
        __LASSERT(page_count(tage->page) > 0);                \
 } while (0)
 
index c74514f99f90ddfadf198f7c9b3800cd458f7ff5..75d31217bf92423b04d47e622eb47e47d87f225b 100644 (file)
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
                for (i = 0; i < (int)niov; i++) {
                        /* We take the page pointer on trust */
                        if (lmd->md_iov.kiov[i].kiov_offset +
-                           lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
+                           lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
                                return -EINVAL; /* invalid length */
 
                        total_length += lmd->md_iov.kiov[i].kiov_len;
index 0009a8de77d577fc17167ea35ecb0fbdcacfd6dc..f19aa9320e3401dac60f537931caaf539df4bfa5 100644 (file)
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
                if (len <= frag_len) {
                        dst->kiov_len = len;
                        LASSERT(dst->kiov_offset + dst->kiov_len
-                                       <= PAGE_CACHE_SIZE);
+                                       <= PAGE_SIZE);
                        return niov;
                }
 
                dst->kiov_len = frag_len;
-               LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+               LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
 
                len -= frag_len;
                dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
        rbp = &the_lnet.ln_rtrpools[cpt][0];
 
        LASSERT(msg->msg_len <= LNET_MTU);
-       while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+       while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
                rbp++;
                LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
        }
index cc0c2753dd63f10e8ef8e78120663b5a9d1d0640..891fd59401d7eedbba81e19e5939d7c7cbccb4d6 100644 (file)
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
        nalloc = 16;    /* first guess at max interfaces */
        toobig = 0;
        for (;;) {
-               if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+               if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
                        toobig = 1;
-                       nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
+                       nalloc = PAGE_SIZE / sizeof(*ifr);
                        CWARN("Too many interfaces: only enumerating first %d\n",
                              nalloc);
                }
index 61459cf9d58fa7121099f6b9716a8be7416a6955..b01dc424c514e0d5d48ca0f5e8dfafaf94dd7899 100644 (file)
@@ -27,8 +27,8 @@
 #define LNET_NRB_SMALL_PAGES   1
 #define LNET_NRB_LARGE_MIN     256     /* min value for each CPT */
 #define LNET_NRB_LARGE         (LNET_NRB_LARGE_MIN * 4)
-#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
-                                PAGE_CACHE_SHIFT)
+#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_SIZE - 1) >> \
+                                PAGE_SHIFT)
 
 static char *forwarding = "";
 module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
                        return NULL;
                }
 
-               rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+               rb->rb_kiov[i].kiov_len = PAGE_SIZE;
                rb->rb_kiov[i].kiov_offset = 0;
                rb->rb_kiov[i].kiov_page = page;
        }
index eebc92412061fef6589c076b38bf62432222e220..dcb6e506f592a0b5cf784fce5caf8431995e0243 100644 (file)
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
                 * NB: this is not going to work for variable page size,
                 * but we have to keep it for compatibility
                 */
-               len = npg * PAGE_CACHE_SIZE;
+               len = npg * PAGE_SIZE;
 
        } else {
                test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
                opc = breq->blk_opc;
                flags = breq->blk_flags;
                len = breq->blk_len;
-               npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        }
 
        if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
 
        if (pattern == LST_BRW_CHECK_SIMPLE) {
                memcpy(addr, &magic, BRW_MSIZE);
-               addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+               addr += PAGE_SIZE - BRW_MSIZE;
                memcpy(addr, &magic, BRW_MSIZE);
                return;
        }
 
        if (pattern == LST_BRW_CHECK_FULL) {
-               for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+               for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
                        memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
                return;
        }
@@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
                if (data != magic)
                        goto bad_data;
 
-               addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+               addr += PAGE_SIZE - BRW_MSIZE;
                data = *((__u64 *)addr);
                if (data != magic)
                        goto bad_data;
@@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
        }
 
        if (pattern == LST_BRW_CHECK_FULL) {
-               for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
+               for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
                        data = *(((__u64 *)addr) + i);
                        if (data != magic)
                                goto bad_data;
@@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
                opc = breq->blk_opc;
                flags = breq->blk_flags;
                npg = breq->blk_npg;
-               len = npg * PAGE_CACHE_SIZE;
+               len = npg * PAGE_SIZE;
 
        } else {
                test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
                opc = breq->blk_opc;
                flags = breq->blk_flags;
                len = breq->blk_len;
-               npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        }
 
        rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
                        reply->brw_status = EINVAL;
                        return 0;
                }
-               npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+               npg = reqst->brw_len >> PAGE_SHIFT;
 
        } else {
-               npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        }
 
        replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
index 5c7cb72eac9a5b4ff59162b06aacd8352a26f38c..79ee6c0bf7c14aa0c21060c2fd10efe5007c9566 100644 (file)
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
        if (args->lstio_tes_param &&
            (args->lstio_tes_param_len <= 0 ||
             args->lstio_tes_param_len >
-            PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+            PAGE_SIZE - sizeof(lstcon_test_t)))
                return -EINVAL;
 
        LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
 
        opc = data->ioc_u32[0];
 
-       if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+       if (data->ioc_plen1 > PAGE_SIZE)
                return -EINVAL;
 
        LIBCFS_ALLOC(buf, data->ioc_plen1);
index bcd78888f9cc3381b99e7bfb93d1b3fdc7f60869..35a227d0c6577ab79555487b52a05a70668ba0e1 100644 (file)
@@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
        test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
 
        brq->blk_opc = param->blk_opc;
-       brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
-                       PAGE_CACHE_SIZE;
+       brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
+                       PAGE_SIZE;
        brq->blk_flags = param->blk_flags;
 
        return 0;
@@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
        if (transop == LST_TRANS_TSBCLIADD) {
                npg = sfw_id_pages(test->tes_span);
                nob = !(feats & LST_FEAT_BULK_LEN) ?
-                     npg * PAGE_CACHE_SIZE :
+                     npg * PAGE_SIZE :
                      sizeof(lnet_process_id_packed_t) * test->tes_span;
        }
 
@@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
                        LASSERT(nob > 0);
 
                        len = !(feats & LST_FEAT_BULK_LEN) ?
-                             PAGE_CACHE_SIZE :
-                             min_t(int, nob, PAGE_CACHE_SIZE);
+                             PAGE_SIZE :
+                             min_t(int, nob, PAGE_SIZE);
                        nob -= len;
 
                        bulk->bk_iovs[i].kiov_offset = 0;
index 926c3970c498e1b461739b0c10808bb487a1fc46..e2c53239936684efb1ac6bd3862939deb11a4851 100644 (file)
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
                int len;
 
                if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
-                       len = npg * PAGE_CACHE_SIZE;
+                       len = npg * PAGE_SIZE;
 
                } else {
                        len = sizeof(lnet_process_id_packed_t) *
index 69be7d6f48fa9173ad3510fc56e4e4b9e45eb680..7d7748d96332613d4b27aaba5f49cb59a5f9ddc5 100644 (file)
@@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
 static int
 srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
 {
-       nob = min_t(int, nob, PAGE_CACHE_SIZE);
+       nob = min_t(int, nob, PAGE_SIZE);
 
        LASSERT(nob > 0);
        LASSERT(i >= 0 && i < bk->bk_niov);
index 288522d4d7b94fa5f42f326b522e3c33af888d8b..e689ca1846e191f9013cb4129c5ce75a65759310 100644 (file)
@@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
        } tsi_u;
 } sfw_test_instance_t;
 
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used */
 #define SFW_MAX_CONCUR    LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE    (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_ID_PER_PAGE    (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
 #define SFW_MAX_NDESTS    (LNET_MAX_IOV * SFW_ID_PER_PAGE)
 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
 
index 33e0b99e1fb4712db6b9797dc3d0f295eb0f7343..c6c7f54637fb2a35b97c1fe3b7d0e9cdb73517b9 100644 (file)
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
                return;
 
        if (PagePrivate(page))
-               page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
 
        cancel_dirty_page(page);
        ClearPageMappedToDisk(page);
index b5088b13a3051b902fa450af6be0ae9ba4f61fe9..242bb1ef62453a3b022cfdbd558a48170aab2dde 100644 (file)
@@ -1118,7 +1118,7 @@ struct lu_context_key {
        {                                                        \
                type *value;                                  \
                                                                  \
-               CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value));       \
+               CLASSERT(PAGE_SIZE >= sizeof (*value));       \
                                                                  \
                value = kzalloc(sizeof(*value), GFP_NOFS);      \
                if (!value)                             \
index da8bc6eadd13fe0f5067f1eef6dddd4810b0f989..5aae1d06a5fa9a7cb80f1469485eaedc616392f7 100644 (file)
@@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
  * MDS_READPAGE page size
  *
  * This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
  * access the struct lu_dirpage header packed at the beginning of
  * the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
  */
 #define LU_PAGE_SHIFT 12
 #define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
 #define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))
 
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
 
 /** @} lu_dir */
 
index df94f9f3bef27dcb9f0ab712ad6b1e02f50afa0d..af77eb359c434333476e47bac1a6f87815fcfd35 100644 (file)
@@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
                if (cli->cl_max_mds_easize < body->max_mdsize) {
                        cli->cl_max_mds_easize = body->max_mdsize;
                        cli->cl_default_mds_easize =
-                           min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
+                           min_t(__u32, body->max_mdsize, PAGE_SIZE);
                }
                if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
                        cli->cl_max_mds_cookiesize = body->max_cookiesize;
                        cli->cl_default_mds_cookiesize =
-                           min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
+                           min_t(__u32, body->max_cookiesize, PAGE_SIZE);
                }
        }
 }
index 4fa1a18b7d1510d3a924a60136a8b95c33478bae..69586a522eb7fa152b368fe8a791752fbd71ac41 100644 (file)
  */
 #define PTLRPC_MAX_BRW_BITS    (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
 #define PTLRPC_MAX_BRW_SIZE    (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES   (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES   (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
 
 #define ONE_MB_BRW_SIZE                (1 << LNET_MTU_BITS)
 #define MD_MAX_BRW_SIZE                (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES       (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define MD_MAX_BRW_PAGES       (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define DT_MAX_BRW_SIZE                PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES       (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_PAGES       (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define OFD_MAX_BRW_SIZE       (1 << LNET_MTU_BITS)
 
 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
 #  error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
 # endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-#  error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+#  error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
 # endif
 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
 #  error "PTLRPC_MAX_BRW_SIZE too big"
index 4a0f2e8b19f6cad6f056853429cde1be0ac9c357..4264d97650ecf4776bfe785cdf80627940d68ef2 100644 (file)
@@ -272,7 +272,7 @@ struct client_obd {
        int              cl_grant_shrink_interval; /* seconds */
 
        /* A chunk is an optimal size used by osc_extent to determine
-        * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
+        * the extent size. A chunk is max(PAGE_SIZE, OST block size)
         */
        int               cl_chunkbits;
        int               cl_chunk;
@@ -1318,7 +1318,7 @@ bad_format:
 
 static inline int cli_brw_size(struct obd_device *obd)
 {
-       return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+       return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
 }
 
 #endif /* __OBD_H */
index 225262fa67b6cb354d2f4107ec21365ef8636134..f8ee3a3254bab69e4f08f4cb1aec469cd14cb22d 100644 (file)
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
 
 #ifdef POISON_BULK
 #define POISON_PAGE(page, val) do {              \
-       memset(kmap(page), val, PAGE_CACHE_SIZE); \
+       memset(kmap(page), val, PAGE_SIZE); \
        kunmap(page);                             \
 } while (0)
 #else
index aced41ab93a144e5b72c9f8a605f5c6c579538c2..96141d17d07f596274748cef1da4410b03f9328c 100644 (file)
@@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
                                 * --bug 17336
                                 */
                                loff_t size = cl_isize_read(inode);
-                               loff_t cur_index = start >> PAGE_CACHE_SHIFT;
+                               loff_t cur_index = start >> PAGE_SHIFT;
                                loff_t size_index = (size - 1) >>
-                                                   PAGE_CACHE_SHIFT;
+                                                   PAGE_SHIFT;
 
                                if ((size == 0 && cur_index != 0) ||
                                    size_index < cur_index)
index b586d5a88d0043279e5d2b51d4e375b58a040b3e..7dd7df59aa1f2848c731027147384eb66c799c72 100644 (file)
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        cli->cl_avail_grant = 0;
        /* FIXME: Should limit this for the sum of all cl_dirty_max. */
        cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
-       if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
-               cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
+       if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
+               cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
        INIT_LIST_HEAD(&cli->cl_cache_waiters);
        INIT_LIST_HEAD(&cli->cl_loi_ready_list);
        INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         * In the future this should likely be increased. LU-1431
         */
        cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
-                                         LNET_MTU >> PAGE_CACHE_SHIFT);
+                                         LNET_MTU >> PAGE_SHIFT);
 
        if (!strcmp(name, LUSTRE_MDC_NAME)) {
                cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
-       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
                cli->cl_max_rpcs_in_flight = 2;
-       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
                cli->cl_max_rpcs_in_flight = 3;
-       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
                cli->cl_max_rpcs_in_flight = 4;
        } else {
                cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
index 3e937b0502035d6083df20e807dfa7aca3175e5f..b913ba9cf97c5327dabfb5289c3c281f11c2e647 100644 (file)
 /*
  * 50 ldlm locks for 1MB of RAM.
  */
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
 
 /*
  * Maximal possible grant step plan in %.
index c7904a96f9af55cb78b898b992a8d56bf4675e68..74e193e52cd635dbaa871df22be98e727e4247f9 100644 (file)
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
 {
        int avail;
 
-       avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+       avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
        if (likely(avail >= 0))
                avail /= (int)sizeof(struct lustre_handle);
        else
index 4e0a3e5833308215cc1c862ed540eda59519d75b..e4c82883e580973c14d8c39c08980fecd408e2fc 100644 (file)
  * a header lu_dirpage which describes the start/end hash, and whether this
  * page is empty (contains no dir entry) or hash collide with next page.
  * After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
+ * for this integrated page will be adjusted. See lmv_adjust_dirpages().
  *
  */
 
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
        struct page **page_pool;
        struct page *page;
        struct lu_dirpage *dp;
-       int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
+       int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
        int nrdpgs = 0; /* number of pages read actually */
        int npages;
        int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
                if (body->valid & OBD_MD_FLSIZE)
                        cl_isize_write(inode, body->size);
 
-               nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
-                        >> PAGE_CACHE_SHIFT;
+               nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
+                        >> PAGE_SHIFT;
                SetPageUptodate(page0);
        }
        unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
                page = page_pool[i];
 
                if (rc < 0 || i >= nrdpgs) {
-                       page_cache_release(page);
+                       put_page(page);
                        continue;
                }
 
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
                        CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
                               offset, ret);
                }
-               page_cache_release(page);
+               put_page(page);
        }
 
        if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
                        truncate_complete_page(page->mapping, page);
                unlock_page(page);
        }
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
        if (found > 0 && !radix_tree_exceptional_entry(page)) {
                struct lu_dirpage *dp;
 
-               page_cache_get(page);
+               get_page(page);
                spin_unlock_irq(&mapping->tree_lock);
                /*
                 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
                                page = NULL;
                        }
                } else {
-                       page_cache_release(page);
+                       put_page(page);
                        page = ERR_PTR(-EIO);
                }
 
@@ -1507,7 +1506,7 @@ skip_lmm:
                        st.st_gid     = body->gid;
                        st.st_rdev    = body->rdev;
                        st.st_size    = body->size;
-                       st.st_blksize = PAGE_CACHE_SIZE;
+                       st.st_blksize = PAGE_SIZE;
                        st.st_blocks  = body->blocks;
                        st.st_atime   = body->atime;
                        st.st_mtime   = body->mtime;
index 3e1572cb457b9077b726044d4b18f2b60be1a51b..e3c0f1dd4d3130471cddea4a547f3e3773ae755f 100644 (file)
@@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
 /* default to about 40meg of readahead on a given system.  That much tied
  * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
  */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
 
 /* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
 
 enum ra_stat {
        RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
 static inline void ll_invalidate_page(struct page *vmpage)
 {
        struct address_space *mapping = vmpage->mapping;
-       loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+       loff_t offset = vmpage->index << PAGE_SHIFT;
 
        LASSERT(PageLocked(vmpage));
        if (!mapping)
                return;
 
-       ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+       ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
        truncate_complete_page(mapping, vmpage);
 }
 
index 6d6bb33e36557352fcbe8b9a16cd9ec7c5eaa39e..b57a992688a87bb8cabdc88cb5a254eebe57f135 100644 (file)
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
 
        si_meminfo(&si);
        pages = si.totalram - si.totalhigh;
-       if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
+       if (pages >> (20 - PAGE_SHIFT) < 512)
                lru_page_max = pages / 2;
        else
                lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
            valid != CLIENT_CONNECT_MDT_REQD) {
                char *buf;
 
-               buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+               buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
                if (!buf) {
                        err = -ENOMEM;
                        goto out_md_fid;
                }
-               obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+               obd_connect_flags2str(buf, PAGE_SIZE,
                                      valid ^ CLIENT_CONNECT_MDT_REQD, ",");
                LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
                                   sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
                sbi->ll_md_brw_size = data->ocd_brw_size;
        else
-               sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
+               sbi->ll_md_brw_size = PAGE_SIZE;
 
        if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
                LCONSOLE_INFO("Layout lock feature supported.\n");
index 69445a9f201196b257ea91c325494c184ba3cdc8..5b484e62ffd0a9e7babfd38201e5bc68f5cc814b 100644 (file)
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
                     size_t count)
 {
        policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
-                                (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+                                (vma->vm_pgoff << PAGE_SHIFT);
        policy->l_extent.end = (policy->l_extent.start + count - 1) |
                               ~CFS_PAGE_MASK;
 }
@@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 
                vmpage = vio->u.fault.ft_vmpage;
                if (result != 0 && vmpage) {
-                       page_cache_release(vmpage);
+                       put_page(vmpage);
                        vmf->page = NULL;
                }
        }
@@ -360,7 +360,7 @@ restart:
                lock_page(vmpage);
                if (unlikely(!vmpage->mapping)) { /* unlucky */
                        unlock_page(vmpage);
-                       page_cache_release(vmpage);
+                       put_page(vmpage);
                        vmf->page = NULL;
 
                        if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
        LASSERTF(last > first, "last %llu first %llu\n", last, first);
        if (mapping_mapped(mapping)) {
                rc = 0;
-               unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+               unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
                                    last - first + 1, 0);
        }
 
index b725fc16cf49fab8f8d32beb418ce7e3f7ac2faf..f169c0db63b44a93017f418a1f699528d572e3af 100644 (file)
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
                offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
                bio_for_each_segment(bvec, bio, iter) {
                        BUG_ON(bvec.bv_offset != 0);
-                       BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+                       BUG_ON(bvec.bv_len != PAGE_SIZE);
 
                        pages[page_count] = bvec.bv_page;
                        offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
                        (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
                        page_count);
 
-       pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+       pvec->ldp_size = page_count << PAGE_SHIFT;
        pvec->ldp_nr = page_count;
 
        /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
 
        set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
-       lo->lo_blocksize = PAGE_CACHE_SIZE;
+       lo->lo_blocksize = PAGE_SIZE;
        lo->lo_device = bdev;
        lo->lo_flags = lo_flags;
        lo->lo_backing_file = file;
@@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
        lo->lo_queue->queuedata = lo;
 
        /* queue parameters */
-       CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+       CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
        blk_queue_logical_block_size(lo->lo_queue,
-                                    (unsigned short)PAGE_CACHE_SIZE);
+                                    (unsigned short)PAGE_SIZE);
        blk_queue_max_hw_sectors(lo->lo_queue,
-                                LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+                                LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
        blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
 
        set_capacity(disks[lo->lo_number], size);
index 45941a6600fed443b2bd50f5ee0d76accffdb467..27ab1261400e4f454e58a26bec9c6a3c53039af7 100644 (file)
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
        pages_number = sbi->ll_ra_info.ra_max_pages;
        spin_unlock(&sbi->ll_lock);
 
-       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       mult = 1 << (20 - PAGE_SHIFT);
        return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
        if (rc)
                return rc;
 
-       pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+       pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
 
        if (pages_number > totalram_pages / 2) {
 
                CERROR("can't set file readahead more than %lu MB\n",
-                      totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+                      totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
                return -ERANGE;
        }
 
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
        pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
        spin_unlock(&sbi->ll_lock);
 
-       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       mult = 1 << (20 - PAGE_SHIFT);
        return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
        pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
        spin_unlock(&sbi->ll_lock);
 
-       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       mult = 1 << (20 - PAGE_SHIFT);
        return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
         */
        if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
                CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
-                      sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+                      sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
                return -ERANGE;
        }
 
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        struct super_block     *sb    = m->private;
        struct ll_sb_info      *sbi   = ll_s2sbi(sb);
        struct cl_client_cache *cache = &sbi->ll_cache;
-       int shift = 20 - PAGE_CACHE_SHIFT;
+       int shift = 20 - PAGE_SHIFT;
        int max_cached_mb;
        int unused_mb;
 
@@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
                return -EFAULT;
        kernbuf[count] = 0;
 
-       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       mult = 1 << (20 - PAGE_SHIFT);
        buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
                  kernbuf;
        rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
        if (pages_number < 0 || pages_number > totalram_pages) {
                CERROR("%s: can't set max cache more than %lu MB\n",
                       ll_get_fsname(sb, NULL, 0),
-                      totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+                      totalram_pages >> (20 - PAGE_SHIFT));
                return -ERANGE;
        }
 
index 34614acf3f8ea77a81c484032e3a9d6f8026d2b1..edab6c5b7e5002ef6e1642a6b7b135fa4f6dc9de 100644 (file)
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
                 */
                io->ci_lockreq = CILR_NEVER;
 
-               pos = vmpage->index << PAGE_CACHE_SHIFT;
+               pos = vmpage->index << PAGE_SHIFT;
 
                /* Create a temp IO to serve write. */
-               result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+               result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
                if (result == 0) {
                        cio->cui_fd = LUSTRE_FPRIVATE(file);
                        cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
                }
                if (rc != 1)
                        unlock_page(vmpage);
-               page_cache_release(vmpage);
+               put_page(vmpage);
        } else {
                which = RA_STAT_FAILED_GRAB_PAGE;
                msg   = "g_c_p_n failed";
@@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
  * striped over, rather than having a constant value for all files here.
  */
 
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
  * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
  * by default, this should be adjusted corresponding with max_read_ahead_mb
  * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
  * up quickly which will affect read performance significantly. See LU-2816
  */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
 
 static inline int stride_io_mode(struct ll_readahead_state *ras)
 {
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                        end = rpc_boundary;
 
                /* Truncate RA window to end of file */
-               end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+               end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
 
                ras->ras_next_readahead = max(end, end + 1);
                RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
        if (reserved != 0)
                ll_ra_count_put(ll_i2sbi(inode), reserved);
 
-       if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+       if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
                ll_ra_stats_inc(mapping, RA_STAT_EOF);
 
        /* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
        if (ras->ras_requests == 2 && !ras->ras_request_index) {
                __u64 kms_pages;
 
-               kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-                           PAGE_CACHE_SHIFT;
+               kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+                           PAGE_SHIFT;
 
                CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
                       ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
                 * PageWriteback or clean the page.
                 */
                result = cl_sync_file_range(inode, offset,
-                                           offset + PAGE_CACHE_SIZE - 1,
+                                           offset + PAGE_SIZE - 1,
                                            CL_FSYNC_LOCAL, 1);
                if (result > 0) {
                        /* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
        int ignore_layout = 0;
 
        if (wbc->range_cyclic) {
-               start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+               start = mapping->writeback_index << PAGE_SHIFT;
                end = OBD_OBJECT_EOF;
        } else {
                start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
                if (end == OBD_OBJECT_EOF)
                        end = i_size_read(inode);
-               mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+               mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
        }
        return result;
 }
index 7a5db67bc68085e7749ec9238c01bda3c822ba33..69aa15e8e3efadee0c4a705a133848288884c5b9 100644 (file)
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
         * below because they are run with page locked and all our io is
         * happening with locked page too
         */
-       if (offset == 0 && length == PAGE_CACHE_SIZE) {
+       if (offset == 0 && length == PAGE_SIZE) {
                env = cl_env_get(&refcheck);
                if (!IS_ERR(env)) {
                        inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
                return -EFBIG;
        }
 
-       *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+       *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       *max_pages -= user_addr >> PAGE_SHIFT;
 
        *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
        if (*pages) {
@@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
        for (i = 0; i < npages; i++) {
                if (do_dirty)
                        set_page_dirty_lock(pages[i]);
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
        kvfree(pages);
 }
@@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
  */
 #define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) *      \
-                      PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
+                      PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
 static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
                               loff_t file_offset)
 {
@@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
        CDEBUG(D_VFSTRACE,
               "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
               inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
-              file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
-              MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+              file_offset, file_offset, count >> PAGE_SHIFT,
+              MAX_DIO_SIZE >> PAGE_SHIFT);
 
        /* Check that all user buffers are aligned as well */
        if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
                         * page worth of page pointers = 4MB on i386.
                         */
                        if (result == -ENOMEM &&
-                           size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
-                                  PAGE_CACHE_SIZE) {
+                           size > (PAGE_SIZE / sizeof(*pages)) *
+                           PAGE_SIZE) {
                                size = ((((size / 2) - 1) |
                                         ~CFS_PAGE_MASK) + 1) &
                                        CFS_PAGE_MASK;
@@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned flags,
                          struct page **pagep, void **fsdata)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct page *page;
        int rc;
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
@@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
        rc = ll_prepare_write(file, page, from, from + len);
        if (rc) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return rc;
 }
@@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *page, void *fsdata)
 {
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        int rc;
 
        rc = ll_commit_write(file, page, from, from + copied);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return rc ?: copied;
 }
index fb0c26ee7ff3a58d6adc3c25adfc2cfd61c09169..85a835976174e7aa9be7459514f823070fd4e39b 100644 (file)
@@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
                vio->cui_ra_window_set = 1;
                bead->lrr_start = cl_index(obj, pos);
                /*
-                * XXX: explicit PAGE_CACHE_SIZE
+                * XXX: explicit PAGE_SIZE
                 */
-               bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+               bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
                ll_ra_read_in(file, bead);
        }
 
@@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
                 * We're completely overwriting an existing page, so _don't_
                 * set it up to date until commit_write
                 */
-               if (from == 0 && to == PAGE_CACHE_SIZE) {
+               if (from == 0 && to == PAGE_SIZE) {
                        CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
                        POISON_PAGE(page, 0x11);
                } else
@@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
                        set_page_dirty(vmpage);
                        vvp_write_pending(cl2ccc(obj), cp);
                } else if (result == -EDQUOT) {
-                       pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+                       pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
                        bool need_clip = true;
 
                        /*
@@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
                         * being.
                         */
                        if (last_index > pg->cp_index) {
-                               to = PAGE_CACHE_SIZE;
+                               to = PAGE_SIZE;
                                need_clip = false;
                        } else if (last_index == pg->cp_index) {
                                int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
index 850bae73407513deecd367e54ade48617abfe0aa..33ca3eb34965ebe87a1644d70af073a06b5d88a4 100644 (file)
@@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
        struct page *vmpage = cp->cpg_page;
 
        LASSERT(vmpage);
-       page_cache_release(vmpage);
+       put_page(vmpage);
 }
 
 static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
        LASSERT(vmpage);
        LASSERT(PageLocked(vmpage));
 
-       offset = vmpage->index << PAGE_CACHE_SHIFT;
+       offset = vmpage->index << PAGE_SHIFT;
 
        /*
         * XXX is it safe to call this with the page lock held?
         */
-       ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+       ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
        return 0;
 }
 
@@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
        cpg->cpg_page = vmpage;
-       page_cache_get(vmpage);
+       get_page(vmpage);
 
        INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
        if (page->cp_type == CPT_CACHEABLE) {
index 0f776cf8a5aa37e237516237b6ada6da3823b393..9abb7c2b92315d17dd8ba670f9c43876846683d6 100644 (file)
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
  * |s|e|f|p|ent| 0 | ... | 0 |
  * '-----------------   -----'
  *
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
  * larger than LU_PAGE_SIZE, a single host page may contain multiple
  * lu_dirpages. After reading the lu_dirpages from the MDS, the
  * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
  * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
  *   to the first entry of the next lu_dirpage.
  */
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
 static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
 {
        int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
 }
 #else
 #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
 
 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
                        struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
        struct lmv_obd          *lmv = &obd->u.lmv;
        __u64                   offset = op_data->op_offset;
        int                     rc;
-       int                     ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+       int                     ncfspgs; /* pages read in PAGE_SIZE */
        int                     nlupgs; /* pages read in LU_PAGE_SIZE */
        struct lmv_tgt_desc     *tgt;
 
@@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
        if (rc != 0)
                return rc;
 
-       ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
-                >> PAGE_CACHE_SHIFT;
+       ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
+                >> PAGE_SHIFT;
        nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
        LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
        LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
index 55dd8ef9525ba3bedbbc7df00390f8c6739fe68d..b91d3ff18b02d2dc93913b204756aed8c45f4b3b 100644 (file)
@@ -1002,10 +1002,10 @@ restart_bulk:
 
        /* NB req now owns desc and will free it when it gets freed */
        for (i = 0; i < op_data->op_npages; i++)
-               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
 
        mdc_readdir_pack(req, op_data->op_offset,
-                        PAGE_CACHE_SIZE * op_data->op_npages,
+                        PAGE_SIZE * op_data->op_npages,
                         &op_data->op_fid1);
 
        ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
        if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
                CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
                       req->rq_bulk->bd_nob_transferred,
-                      PAGE_CACHE_SIZE * op_data->op_npages);
+                      PAGE_SIZE * op_data->op_npages);
                ptlrpc_req_finished(req);
                return -EPROTO;
        }
index b7dc87248032fcbf1afea9975193810109c4ca0a..3924b095bfb01923987cd06b9b683c534b0ff9bd 100644 (file)
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
 }
 
 enum {
-       CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+       CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
        CONFIG_READ_NRPAGES      = 4
 };
 
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
        LASSERT(cfg->cfg_instance);
        LASSERT(cfg->cfg_sb == cfg->cfg_instance);
 
-       inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+       inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (!inst)
                return -ENOMEM;
 
-       pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
-       if (pos >= PAGE_CACHE_SIZE) {
+       pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
+       if (pos >= PAGE_SIZE) {
                kfree(inst);
                return -E2BIG;
        }
 
        ++pos;
        buf   = inst + pos;
-       bufsz = PAGE_CACHE_SIZE - pos;
+       bufsz = PAGE_SIZE - pos;
 
        while (datalen > 0) {
                int   entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
                /* Keep this swab for normal mixed endian handling. LU-1644 */
                if (mne_swab)
                        lustre_swab_mgs_nidtbl_entry(entry);
-               if (entry->mne_length > PAGE_CACHE_SIZE) {
+               if (entry->mne_length > PAGE_SIZE) {
                        CERROR("MNE too large (%u)\n", entry->mne_length);
                        break;
                }
@@ -1371,7 +1371,7 @@ again:
        }
        body->mcb_offset = cfg->cfg_last_idx + 1;
        body->mcb_type   = cld->cld_type;
-       body->mcb_bits   = PAGE_CACHE_SHIFT;
+       body->mcb_bits   = PAGE_SHIFT;
        body->mcb_units  = nrpages;
 
        /* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
        }
 
        for (i = 0; i < nrpages; i++)
-               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
 
        ptlrpc_request_set_replen(req);
        rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
                goto out;
        }
 
-       if (ealen > nrpages << PAGE_CACHE_SHIFT) {
+       if (ealen > nrpages << PAGE_SHIFT) {
                rc = -EINVAL;
                goto out;
        }
@@ -1439,7 +1439,7 @@ again:
 
                ptr = kmap(pages[i]);
                rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
-                                            min_t(int, ealen, PAGE_CACHE_SIZE),
+                                            min_t(int, ealen, PAGE_SIZE),
                                             mne_swab);
                kunmap(pages[i]);
                if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
                        break;
                }
 
-               ealen -= PAGE_CACHE_SIZE;
+               ealen -= PAGE_SIZE;
        }
 
 out:
index 231a2f26c693f9ef17c070d384ad0a28d27fb069..394580016638dbc889059754b96e350640f64af7 100644 (file)
@@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
        /*
         * XXX for now.
         */
-       return (loff_t)idx << PAGE_CACHE_SHIFT;
+       return (loff_t)idx << PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_offset);
 
@@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
        /*
         * XXX for now.
         */
-       return offset >> PAGE_CACHE_SHIFT;
+       return offset >> PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_index);
 
 int cl_page_size(const struct cl_object *obj)
 {
-       return 1 << PAGE_CACHE_SHIFT;
+       return 1 << PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
index 1a938e1376f94df46e2b66366fdfc655b35f1c17..c2cf015962dd41c1a7c3856b10425c8a413c89cf 100644 (file)
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
                CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
                ret = -EINVAL;
        }
-       if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+       if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
                CWARN("mask failed: u64val %llu >= %llu\n", u64val,
-                     (__u64)PAGE_CACHE_SIZE);
+                     (__u64)PAGE_SIZE);
                ret = -EINVAL;
        }
 
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
         * For clients with less memory, a larger fraction is needed
         * for other purposes (mostly for BGL).
         */
-       if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+       if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
                obd_max_dirty_pages = totalram_pages / 4;
        else
                obd_max_dirty_pages = totalram_pages / 2;
index 9496c09b2b69cf9d51c0b3fb35beb62f085870d2..b41b65e2f021f38b501b0fcef03bb9e193c31b8a 100644 (file)
@@ -47,7 +47,6 @@
 #include "../../include/lustre/lustre_idl.h"
 
 #include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
 
 void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
 {
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
        if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
                dst->i_blkbits = ffs(src->o_blksize) - 1;
 
-       if (dst->i_blkbits < PAGE_CACHE_SHIFT)
-               dst->i_blkbits = PAGE_CACHE_SHIFT;
+       if (dst->i_blkbits < PAGE_SHIFT)
+               dst->i_blkbits = PAGE_SHIFT;
 
        /* allocation of space */
        if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
index fd333b9e968c235d09726a0c208e6c0bcbdd9511..e6bf414a44446cc8f27b87c54a244563b9102f56 100644 (file)
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
                                 char *buf)
 {
        return sprintf(buf, "%ul\n",
-                       obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
+                       obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
 }
 
 static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
        if (rc)
                return rc;
 
-       val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
+       val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
 
        if (val > ((totalram_pages / 10) * 9)) {
                /* Somebody wants to assign too much memory to dirty pages */
                return -EINVAL;
        }
 
-       if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+       if (val < 4 << (20 - PAGE_SHIFT)) {
                /* Less than 4 Mb for dirty cache is also bad */
                return -EINVAL;
        }
index 65a4746c89ca1e9565a764be954558c88d74314a..978568ada8e93025c4877681edef57f993d67d38 100644 (file)
@@ -840,8 +840,8 @@ static int lu_htable_order(void)
 
 #if BITS_PER_LONG == 32
        /* limit hashtable size for lowmem systems to low RAM */
-       if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
-               cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+       if (cache_size > 1 << (30 - PAGE_SHIFT))
+               cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
 #endif
 
        /* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@ static int lu_htable_order(void)
                lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
        }
        cache_size = cache_size / 100 * lu_cache_percent *
-               (PAGE_CACHE_SIZE / 1024);
+               (PAGE_SIZE / 1024);
 
        for (bits = 1; (1 << bits) < cache_size; ++bits) {
                ;
index 64ffe243f87013e0e52be3e5fa2903aba4291816..1e83669c204d988341651b0355fde627d4b00f1d 100644 (file)
@@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
        struct page *vmpage      = ep->ep_vmpage;
 
        atomic_dec(&eco->eo_npages);
-       page_cache_release(vmpage);
+       put_page(vmpage);
 }
 
 static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
        struct echo_object *eco = cl2echo_obj(obj);
 
        ep->ep_vmpage = vmpage;
-       page_cache_get(vmpage);
+       get_page(vmpage);
        mutex_init(&ep->ep_lock);
        cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
        atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
        LASSERT(rc == 0);
 
        rc = cl_echo_enqueue0(env, eco, offset,
-                             offset + npages * PAGE_CACHE_SIZE - 1,
+                             offset + npages * PAGE_SIZE - 1,
                              rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
                              CEF_NEVER);
        if (rc < 0)
@@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
        int      delta;
 
        /* no partial pages on the client */
-       LASSERT(count == PAGE_CACHE_SIZE);
+       LASSERT(count == PAGE_SIZE);
 
        addr = kmap(page);
 
-       for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+       for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
                if (rw == OBD_BRW_WRITE) {
                        stripe_off = offset + delta;
                        stripe_id = id;
@@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
        int     rc2;
 
        /* no partial pages on the client */
-       LASSERT(count == PAGE_CACHE_SIZE);
+       LASSERT(count == PAGE_SIZE);
 
        addr = kmap(page);
 
-       for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+       for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
                stripe_off = offset + delta;
                stripe_id = id;
 
@@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
                return -EINVAL;
 
        /* XXX think again with misaligned I/O */
-       npages = count >> PAGE_CACHE_SHIFT;
+       npages = count >> PAGE_SHIFT;
 
        if (rw == OBD_BRW_WRITE)
                brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
 
        for (i = 0, pgp = pga, off = offset;
             i < npages;
-            i++, pgp++, off += PAGE_CACHE_SIZE) {
+            i++, pgp++, off += PAGE_SIZE) {
 
                LASSERT(!pgp->pg);      /* for cleanup */
 
@@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
                        goto out;
 
                pages[i] = pgp->pg;
-               pgp->count = PAGE_CACHE_SIZE;
+               pgp->count = PAGE_SIZE;
                pgp->off = off;
                pgp->flag = brw_flags;
 
@@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
        if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
                return -EINVAL;
 
-       npages = batch >> PAGE_CACHE_SHIFT;
-       tot_pages = count >> PAGE_CACHE_SHIFT;
+       npages = batch >> PAGE_SHIFT;
+       tot_pages = count >> PAGE_SHIFT;
 
        lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
        rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
                if (tot_pages < npages)
                        npages = tot_pages;
 
-               for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
+               for (i = 0; i < npages; i++, off += PAGE_SIZE) {
                        rnb[i].offset = off;
-                       rnb[i].len = PAGE_CACHE_SIZE;
+                       rnb[i].len = PAGE_SIZE;
                        rnb[i].flags = brw_flags;
                }
 
@@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
 {
        LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
 
-       LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+       LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
 
        return echo_client_init();
 }
index 57c43c506ef228e5eb4142c6f78d7f495421f9f3..a3358c39b2f16f86bd5e6d9616b1655ec22666f9 100644 (file)
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
        if (rc)
                return rc;
 
-       pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+       pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
 
        if (pages_number <= 0 ||
-           pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+           pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
            pages_number > totalram_pages / 4) /* 1/4 of RAM */
                return -ERANGE;
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
-       cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
+       cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
        osc_wake_cache_waiters(cli);
        client_obd_list_unlock(&cli->cl_loi_list_lock);
 
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
 {
        struct obd_device *dev = m->private;
        struct client_obd *cli = &dev->u.cli;
-       int shift = 20 - PAGE_CACHE_SHIFT;
+       int shift = 20 - PAGE_SHIFT;
 
        seq_printf(m,
                   "used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
                return -EFAULT;
        kernbuf[count] = 0;
 
-       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       mult = 1 << (20 - PAGE_SHIFT);
        buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
                  kernbuf;
        rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
 
        /* if the max_pages is specified in bytes, convert to pages */
        if (val >= ONE_MB_BRW_SIZE)
-               val >>= PAGE_CACHE_SHIFT;
+               val >>= PAGE_SHIFT;
 
-       chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+       chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
        /* max_pages_per_rpc must be chunk aligned */
        val = (val + ~chunk_mask) & chunk_mask;
-       if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
+       if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
                return -ERANGE;
        }
        client_obd_list_lock(&cli->cl_loi_list_lock);
index 63363111380cbdfdb500067d4d9b7a800fcb3c65..5f25bf83dcfc8ac198cbc6274211d364ba0e032f 100644 (file)
@@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
                return -ERANGE;
 
        LASSERT(cur->oe_osclock == victim->oe_osclock);
-       ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+       ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
        chunk_start = cur->oe_start >> ppc_bits;
        chunk_end = cur->oe_end >> ppc_bits;
        if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
        lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
        LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
 
-       LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
-       ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+       LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
+       ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
        chunk_mask = ~((1 << ppc_bits) - 1);
        chunksize = 1 << cli->cl_chunkbits;
        chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
 
        if (!sent) {
                lost_grant = ext->oe_grants;
-       } else if (blocksize < PAGE_CACHE_SIZE &&
-                  last_count != PAGE_CACHE_SIZE) {
+       } else if (blocksize < PAGE_SIZE &&
+                  last_count != PAGE_SIZE) {
                /* For short writes we shouldn't count parts of pages that
                 * span a whole chunk on the OST side, or our accounting goes
                 * wrong.  Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
                if (end)
                        count += blocksize - end;
 
-               lost_grant = PAGE_CACHE_SIZE - count;
+               lost_grant = PAGE_SIZE - count;
        }
        if (ext->oe_grants > 0)
                osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
        struct osc_async_page *oap;
        struct osc_async_page *tmp;
        int pages_in_chunk = 0;
-       int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+       int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
        __u64 trunc_chunk = trunc_index >> ppc_bits;
        int grants = 0;
        int nr_pages = 0;
@@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
        if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
                last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
                LASSERT(last->oap_count > 0);
-               LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
+               LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
                last->oap_async_flags |= ASYNC_COUNT_STABLE;
        }
 
@@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
         */
        list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
                if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
-                       oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+                       oap->oap_count = PAGE_SIZE - oap->oap_page_off;
                        oap->oap_async_flags |= ASYNC_COUNT_STABLE;
                }
        }
@@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
        struct osc_object *obj = ext->oe_obj;
        struct client_obd *cli = osc_cli(obj);
        struct osc_extent *next;
-       int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+       int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
        pgoff_t chunk = index >> ppc_bits;
        pgoff_t end_chunk;
        pgoff_t end_index;
@@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
                return 0;
        else if (cl_offset(obj, page->cp_index + 1) > kms)
                /* catch sub-page write at end of file */
-               return kms % PAGE_CACHE_SIZE;
+               return kms % PAGE_SIZE;
        else
-               return PAGE_CACHE_SIZE;
+               return PAGE_SIZE;
 }
 
 static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
        assert_spin_locked(&cli->cl_loi_list_lock.lock);
        LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
        atomic_inc(&obd_dirty_pages);
-       cli->cl_dirty += PAGE_CACHE_SIZE;
+       cli->cl_dirty += PAGE_SIZE;
        pga->flag |= OBD_BRW_FROM_GRANT;
        CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
-              PAGE_CACHE_SIZE, pga, pga->pg);
+              PAGE_SIZE, pga, pga->pg);
        osc_update_next_shrink(cli);
 }
 
@@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
 
        pga->flag &= ~OBD_BRW_FROM_GRANT;
        atomic_dec(&obd_dirty_pages);
-       cli->cl_dirty -= PAGE_CACHE_SIZE;
+       cli->cl_dirty -= PAGE_SIZE;
        if (pga->flag & OBD_BRW_NOCACHE) {
                pga->flag &= ~OBD_BRW_NOCACHE;
                atomic_dec(&obd_dirty_transit_pages);
-               cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+               cli->cl_dirty_transit -= PAGE_SIZE;
        }
 }
 
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
  * used, we should return these grants to OST. There're two cases where grants
  * can be lost:
  * 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
  *    written. In this case OST may use less chunks to serve this partial
  *    write. OSTs don't actually know the page size on the client side. so
  *    clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
        atomic_sub(nr_pages, &obd_dirty_pages);
-       cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+       cli->cl_dirty -= nr_pages << PAGE_SHIFT;
        cli->cl_lost_grant += lost_grant;
        if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
                /* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
        if (rc < 0)
                return 0;
 
-       if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
+       if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
            atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
                osc_consume_write_grant(cli, &oap->oap_brw_page);
                if (transient) {
-                       cli->cl_dirty_transit += PAGE_CACHE_SIZE;
+                       cli->cl_dirty_transit += PAGE_SIZE;
                        atomic_inc(&obd_dirty_transit_pages);
                        oap->oap_brw_flags |= OBD_BRW_NOCACHE;
                }
@@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
         * of queued writes and create a discontiguous rpc stream
         */
        if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
-           cli->cl_dirty_max < PAGE_CACHE_SIZE     ||
+           cli->cl_dirty_max < PAGE_SIZE     ||
            cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
                rc = -EDQUOT;
                goto out;
@@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
 
                ocw->ocw_rc = -EDQUOT;
                /* we can't dirty more */
-               if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
+               if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
                    (atomic_read(&obd_dirty_pages) + 1 >
                     obd_max_dirty_pages)) {
                        CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
index d720b1a1c18c3ef9bebef107fb697b80a30e31fd..ce9ddd515f6450f40dc86e57f486223cb5a6ecb0 100644 (file)
@@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
        int result;
 
        opg->ops_from = 0;
-       opg->ops_to = PAGE_CACHE_SIZE;
+       opg->ops_to = PAGE_SIZE;
 
        result = osc_prep_async_page(osc, opg, vmpage,
                                     cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
 /* LRU pages are freed in batch mode. OSC should at least free this
  * number of pages to avoid running out of LRU budget, and..
  */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT);  /* 2M */
 /* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
 
 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
  * we should free slots aggressively. In this way, slots are freed in a steady
index 74805f1ae8885efe60feb7ab4dbc4dc79c6aaec1..30526ebcad04e52e8e6830fc2d1952c19e3ebd69 100644 (file)
@@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                oa->o_undirty = 0;
        } else {
                long max_in_flight = (cli->cl_max_pages_per_rpc <<
-                                     PAGE_CACHE_SHIFT)*
+                                     PAGE_SHIFT)*
                                     (cli->cl_max_rpcs_in_flight + 1);
                oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
        }
@@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
 static int osc_shrink_grant(struct client_obd *cli)
 {
        __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
-                            (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+                            (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
        if (cli->cl_avail_grant <= target_bytes)
-               target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+               target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
        client_obd_list_unlock(&cli->cl_loi_list_lock);
 
        return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
         * We don't want to shrink below a single RPC, as that will negatively
         * impact block allocation and long-term performance.
         */
-       if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
-               target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+       if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+               target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
 
        if (target_bytes >= cli->cl_avail_grant) {
                client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
                 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
                 * Keep comment here so that it can be found by searching.
                 */
-               int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+               int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
 
                if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
                    client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
        }
 
        /* determine the appropriate chunk size used by osc_extent. */
-       cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+       cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
        client_obd_list_unlock(&cli->cl_loi_list_lock);
 
        CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
                LASSERT(pg->count > 0);
                /* make sure there is no gap in the middle of page array */
                LASSERTF(page_count == 1 ||
-                        (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+                        (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
                          ergo(i > 0 && i < page_count - 1,
-                              poff == 0 && pg->count == PAGE_CACHE_SIZE)   &&
+                              poff == 0 && pg->count == PAGE_SIZE)   &&
                          ergo(i == page_count - 1, poff == 0)),
                         "i: %d/%d pg: %p off: %llu, count: %u\n",
                         i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                                                oap->oap_count;
                        else
                                LASSERT(oap->oap_page_off + oap->oap_count ==
-                                       PAGE_CACHE_SIZE);
+                                       PAGE_SIZE);
                }
        }
 
@@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                tmp->oap_request = ptlrpc_request_addref(req);
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
-       starting_offset >>= PAGE_CACHE_SHIFT;
+       starting_offset >>= PAGE_SHIFT;
        if (cmd == OBD_BRW_READ) {
                cli->cl_r_in_flight++;
                lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@ out:
                                                CFS_PAGE_MASK;
 
                if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
-                   fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+                   fm_key->fiemap.fm_start + PAGE_SIZE - 1)
                        policy.l_extent.end = OBD_OBJECT_EOF;
                else
                        policy.l_extent.end = (fm_key->fiemap.fm_start +
                                fm_key->fiemap.fm_length +
-                               PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
+                               PAGE_SIZE - 1) & CFS_PAGE_MASK;
 
                ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
                mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
index 1b7673eec4d710e853b79bbb82a5d6aa4cd373be..cf3ac8eee9ee476cdb45a5650af25df08a5268e3 100644 (file)
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
        LASSERT(page);
        LASSERT(pageoffset >= 0);
        LASSERT(len > 0);
-       LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+       LASSERT(pageoffset + len <= PAGE_SIZE);
 
        desc->bd_nob += len;
 
        if (pin)
-               page_cache_get(page);
+               get_page(page);
 
        ptlrpc_add_bulk_page(desc, page, pageoffset, len);
 }
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
 
        if (unpin) {
                for (i = 0; i < desc->bd_iov_count; i++)
-                       page_cache_release(desc->bd_iov[i].kiov_page);
+                       put_page(desc->bd_iov[i].kiov_page);
        }
 
        kfree(desc);
index b4eddf29126986cfbb9114bc63e5b06871a9784e..cd94fed0ffdfd4ea37f4b9cf3936ef37c7fd0112 100644 (file)
@@ -1092,7 +1092,7 @@ finish:
 
                if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
                        cli->cl_max_pages_per_rpc =
-                               min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+                               min(ocd->ocd_brw_size >> PAGE_SHIFT,
                                    cli->cl_max_pages_per_rpc);
                else if (imp->imp_connect_op == MDS_CONNECT ||
                         imp->imp_connect_op == MGS_CONNECT)
index cee04efb6fb51d7d1bc82bda63d4d6d63548715c..c95a91ce26c9ba78582f0822a6cfb3838d48b2ca 100644 (file)
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
         * hose a kernel by allowing the request history to grow too
         * far.
         */
-       bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (val > totalram_pages / (2 * bufpages))
                return -ERANGE;
 
@@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
        const char prefix[] = "connection=";
        const int prefix_len = sizeof(prefix) - 1;
 
-       if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+       if (count > PAGE_SIZE - 1 || count <= prefix_len)
                return -EINVAL;
 
        kbuf = kzalloc(count + 1, GFP_NOFS);
index 5f27d9c2e4efb973e5bad114c6c84e3a9b5ca0e0..30d9a164e52dcb5b8e91ded3fc455312cc371595 100644 (file)
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
        }
 
        list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
-               LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+               LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
                         "req %p bad\n", req);
                LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
                if (!ptlrpc_no_resend(req))
index 72d5b9bf5b29b9bb76b2905d57c73f1444156114..d3872b8c9a6edcaa6e6f75acc9819e03e028ff4d 100644 (file)
@@ -58,7 +58,7 @@
  * bulk encryption page pools     *
  ****************************************/
 
-#define POINTERS_PER_PAGE      (PAGE_CACHE_SIZE / sizeof(void *))
+#define POINTERS_PER_PAGE      (PAGE_SIZE / sizeof(void *))
 #define PAGES_PER_POOL         (POINTERS_PER_PAGE)
 
 #define IDLE_IDX_MAX    (100)
index b793c04028a3034ac5b732c25af258dbded1f5f3..be72a8e5f221eba0548341ecf1fab8a0a58ee3d3 100644 (file)
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
 static int vpfe_update_pipe_state(struct vpfe_video_device *video)
 {
        struct vpfe_pipeline *pipe = &video->pipe;
+       int ret;
 
-       if (vpfe_prepare_pipeline(video))
-               return vpfe_prepare_pipeline(video);
+       ret = vpfe_prepare_pipeline(video);
+       if (ret)
+               return ret;
 
        /*
         * Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
         */
        if (pipe->input_num == 0) {
                pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
-               if (vpfe_update_current_ext_subdev(video)) {
+               ret = vpfe_update_current_ext_subdev(video);
+               if (ret) {
                        pr_err("Invalid external subdev\n");
-                       return vpfe_update_current_ext_subdev(video);
+                       return ret;
                }
        } else {
                pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void  *priv,
        struct v4l2_subdev *subdev;
        struct v4l2_format format;
        struct media_pad *remote;
+       int ret;
 
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
 
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void  *priv,
        sd_fmt.pad = remote->index;
        sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
        /* get output format of remote subdev */
-       if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+       ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+       if (ret) {
                v4l2_err(&vpfe_dev->v4l2_dev,
                         "invalid remote subdev for video node\n");
-               return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+               return ret;
        }
        /* convert to pix format */
        mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
        struct vpfe_video_device *video = video_drvdata(file);
        struct vpfe_device *vpfe_dev = video->vpfe_dev;
        struct v4l2_format format;
+       int ret;
 
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
        /* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
                return -EBUSY;
        }
        /* get adjacent subdev's output pad format */
-       if (__vpfe_video_get_format(video, &format))
-               return __vpfe_video_get_format(video, &format);
+       ret = __vpfe_video_get_format(video, &format);
+       if (ret)
+               return ret;
        *fmt = format;
        video->fmt = *fmt;
        return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
        struct vpfe_video_device *video = video_drvdata(file);
        struct vpfe_device *vpfe_dev = video->vpfe_dev;
        struct v4l2_format format;
+       int ret;
 
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
        /* get adjacent subdev's output pad format */
-       if (__vpfe_video_get_format(video, &format))
-               return __vpfe_video_get_format(video, &format);
+       ret = __vpfe_video_get_format(video, &format);
+       if (ret)
+               return ret;
 
        *fmt = format;
        return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
 
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
 
-       if (mutex_lock_interruptible(&video->lock))
-               return mutex_lock_interruptible(&video->lock);
+       ret = mutex_lock_interruptible(&video->lock);
+       if (ret)
+               return ret;
        /*
         * If streaming is started return device busy
         * error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
 
        /* Call decoder driver function to set the standard */
-       if (mutex_lock_interruptible(&video->lock))
-               return mutex_lock_interruptible(&video->lock);
+       ret = mutex_lock_interruptible(&video->lock);
+       if (ret)
+               return ret;
        sdinfo = video->current_ext_subdev;
        /* If streaming is started, return device busy error */
        if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
                return -EINVAL;
        }
 
-       if (mutex_lock_interruptible(&video->lock))
-               return mutex_lock_interruptible(&video->lock);
+       ret = mutex_lock_interruptible(&video->lock);
+       if (ret)
+               return ret;
 
        if (video->io_usrs != 0) {
                v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
        q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 
-       if (vb2_queue_init(q)) {
+       ret = vb2_queue_init(q);
+       if (ret) {
                v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
                vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
-               return vb2_queue_init(q);
+               return ret;
        }
 
        fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
                return -EINVAL;
        }
 
-       if (mutex_lock_interruptible(&video->lock))
-               return mutex_lock_interruptible(&video->lock);
+       ret = mutex_lock_interruptible(&video->lock);
+       if (ret)
+               return ret;
 
        vpfe_stop_capture(video);
        ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644 (file)
index 0000000..d277f04
--- /dev/null
@@ -0,0 +1,35 @@
+config FB_OLPC_DCON
+       tristate "One Laptop Per Child Display CONtroller support"
+       depends on OLPC && FB
+       depends on I2C
+       depends on (GPIO_CS5535 || GPIO_CS5535=n)
+       select BACKLIGHT_CLASS_DEVICE
+       ---help---
+         In order to support very low power operation, the XO laptop uses a
+         secondary Display CONtroller, or DCON.  This secondary controller
+         is present in the video pipeline between the primary display
+         controller (integrate into the processor or chipset) and the LCD
+         panel.  It allows the main processor/display controller to be
+         completely powered off while still retaining an image on the display.
+         This controller is only available on OLPC platforms.  Unless you have
+         one of these platforms, you will want to say 'N'.
+
+config FB_OLPC_DCON_1
+       bool "OLPC XO-1 DCON support"
+       depends on FB_OLPC_DCON && GPIO_CS5535
+       default y
+       ---help---
+         Enable support for the DCON in XO-1 model laptops.  The kernel
+         communicates with the DCON using model-specific code.  If you
+         have an XO-1 (or if you're unsure what model you have), you should
+         say 'Y'.
+
+config FB_OLPC_DCON_1_5
+       bool "OLPC XO-1.5 DCON support"
+       depends on FB_OLPC_DCON && ACPI
+       default y
+       ---help---
+         Enable support for the DCON in XO-1.5 model laptops.  The kernel
+         communicates with the DCON using model-specific code.  If you
+         have an XO-1.5 (or if you're unsure what model you have), you
+         should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644 (file)
index 0000000..36c7e67
--- /dev/null
@@ -0,0 +1,6 @@
+olpc-dcon-objs += olpc_dcon.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1)     += olpc_dcon_xo_1.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5)   += olpc_dcon_xo_1_5.o
+obj-$(CONFIG_FB_OLPC_DCON)     += olpc-dcon.o
+
+
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644 (file)
index 0000000..61c2e65
--- /dev/null
@@ -0,0 +1,9 @@
+TODO:
+       - see if vx855 gpio API can be made similar enough to cs5535 so we can
+         share more code
+       - allow simultaneous XO-1 and XO-1.5 support
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+copy:
+       Daniel Drake <dsd@laptop.org>
+       Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644 (file)
index 0000000..f45b2ef
--- /dev/null
@@ -0,0 +1,813 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007  Red Hat, Inc.
+ * Copyright © 2006-2007  Advanced Micro Devices, Inc.
+ * Copyright © 2009       VIA Technology, Inc.
+ * Copyright (c) 2010-2011  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/reboot.h>
+#include <linux/olpc-ec.h>
+#include <asm/tsc.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+/* Module definitions */
+
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
+
+static struct dcon_platform_data *pdata;
+
+/* I2C structures */
+
+/* Platform devices */
+static struct platform_device *dcon_device;
+
+static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
+
+static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
+{
+       return i2c_smbus_write_word_data(dcon->client, reg, val);
+}
+
+static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
+{
+       return i2c_smbus_read_word_data(dcon->client, reg);
+}
+
+/* ===== API functions - these are called by a variety of users ==== */
+
+static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
+{
+       u16 ver;
+       int rc = 0;
+
+       ver = dcon_read(dcon, DCON_REG_ID);
+       if ((ver >> 8) != 0xDC) {
+               pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
+               rc = -ENXIO;
+               goto err;
+       }
+
+       if (is_init) {
+               pr_info("Discovered DCON version %x\n", ver & 0xFF);
+               rc = pdata->init(dcon);
+               if (rc != 0) {
+                       pr_err("Unable to init.\n");
+                       goto err;
+               }
+       }
+
+       if (ver < 0xdc02) {
+               dev_err(&dcon->client->dev,
+                               "DCON v1 is unsupported, giving up..\n");
+               rc = -ENODEV;
+               goto err;
+       }
+
+       /* SDRAM setup/hold time */
+       dcon_write(dcon, 0x3a, 0xc040);
+       dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000);  /* clear option bits */
+       dcon_write(dcon, DCON_REG_MEM_OPT_A,
+                               MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
+       dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
+
+       /* Colour swizzle, AA, no passthrough, backlight */
+       if (is_init) {
+               dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
+                               MODE_CSWIZZLE | MODE_COL_AA;
+       }
+       dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+
+       /* Set the scanline to interrupt on during resume */
+       dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
+
+err:
+       return rc;
+}
+
+/*
+ * The smbus doesn't always come back due to what is believed to be
+ * hardware (power rail) bugs.  For older models where this is known to
+ * occur, our solution is to attempt to wait for the bus to stabilize;
+ * if it doesn't happen, cut power to the dcon, repower it, and wait
+ * for the bus to stabilize.  Rinse, repeat until we have a working
+ * smbus.  For newer models, we simply BUG(); we want to know if this
+ * still happens despite the power fixes that have been made!
+ */
+static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
+{
+       unsigned long timeout;
+       u8 pm;
+       int x;
+
+power_up:
+       if (is_powered_down) {
+               pm = 1;
+               x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+               if (x) {
+                       pr_warn("unable to force dcon to power up: %d!\n", x);
+                       return x;
+               }
+               usleep_range(10000, 11000);  /* we'll be conservative */
+       }
+
+       pdata->bus_stabilize_wiggle();
+
+       for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
+               usleep_range(1000, 1100);
+               x = dcon_read(dcon, DCON_REG_ID);
+       }
+       if (x < 0) {
+               pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
+               BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
+               pm = 0;
+               olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+               msleep(100);
+               is_powered_down = 1;
+               goto power_up;  /* argh, stupid hardware.. */
+       }
+
+       if (is_powered_down)
+               return dcon_hw_init(dcon, 0);
+       return 0;
+}
+
+static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
+{
+       dcon->bl_val = level;
+       dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
+
+       /* Purposely turn off the backlight when we go to level 0 */
+       if (dcon->bl_val == 0) {
+               dcon->disp_mode &= ~MODE_BL_ENABLE;
+               dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+       } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
+               dcon->disp_mode |= MODE_BL_ENABLE;
+               dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+       }
+}
+
+/* Set the output type to either color or mono */
+static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
+{
+       if (dcon->mono == enable_mono)
+               return 0;
+
+       dcon->mono = enable_mono;
+
+       if (enable_mono) {
+               dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
+               dcon->disp_mode |= MODE_MONO_LUMA;
+       } else {
+               dcon->disp_mode &= ~(MODE_MONO_LUMA);
+               dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
+       }
+
+       dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+       return 0;
+}
+
+/* For now, this will be really stupid - we need to address how
+ * DCONLOAD works in a sleep and account for it accordingly
+ */
+
+static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
+{
+       int x;
+
+       /* Turn off the backlight and put the DCON to sleep */
+
+       if (dcon->asleep == sleep)
+               return;
+
+       if (!olpc_board_at_least(olpc_board(0xc2)))
+               return;
+
+       if (sleep) {
+               u8 pm = 0;
+
+               x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+               if (x)
+                       pr_warn("unable to force dcon to power down: %d!\n", x);
+               else
+                       dcon->asleep = sleep;
+       } else {
+               /* Only re-enable the backlight if the backlight value is set */
+               if (dcon->bl_val != 0)
+                       dcon->disp_mode |= MODE_BL_ENABLE;
+               x = dcon_bus_stabilize(dcon, 1);
+               if (x)
+                       pr_warn("unable to reinit dcon hardware: %d!\n", x);
+               else
+                       dcon->asleep = sleep;
+
+               /* Restore backlight */
+               dcon_set_backlight(dcon, dcon->bl_val);
+       }
+
+       /* We should turn off some stuff in the framebuffer - but what? */
+}
+
+/* the DCON seems to get confused if we change DCONLOAD too
+ * frequently -- i.e., approximately faster than frame time.
+ * normally we don't change it this fast, so in general we won't
+ * delay here.
+ */
+static void dcon_load_holdoff(struct dcon_priv *dcon)
+{
+       ktime_t delta_t, now;
+
+       while (1) {
+               now = ktime_get();
+               delta_t = ktime_sub(now, dcon->load_time);
+               if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
+                       break;
+               mdelay(4);
+       }
+}
+
+static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
+{
+       int err;
+
+       console_lock();
+       if (!lock_fb_info(dcon->fbinfo)) {
+               console_unlock();
+               dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
+               return false;
+       }
+
+       dcon->ignore_fb_events = true;
+       err = fb_blank(dcon->fbinfo,
+                       blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
+       dcon->ignore_fb_events = false;
+       unlock_fb_info(dcon->fbinfo);
+       console_unlock();
+
+       if (err) {
+               dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
+                               blank ? "" : "un");
+               return false;
+       }
+       return true;
+}
+
+/* Set the source of the display (CPU or DCON) */
+static void dcon_source_switch(struct work_struct *work)
+{
+       struct dcon_priv *dcon = container_of(work, struct dcon_priv,
+                       switch_source);
+       int source = dcon->pending_src;
+
+       if (dcon->curr_src == source)
+               return;
+
+       dcon_load_holdoff(dcon);
+
+       dcon->switched = false;
+
+       switch (source) {
+       case DCON_SOURCE_CPU:
+               pr_info("dcon_source_switch to CPU\n");
+               /* Enable the scanline interrupt bit */
+               if (dcon_write(dcon, DCON_REG_MODE,
+                               dcon->disp_mode | MODE_SCAN_INT))
+                       pr_err("couldn't enable scanline interrupt!\n");
+               else
+                       /* Wait up to one second for the scanline interrupt */
+                       wait_event_timeout(dcon->waitq, dcon->switched, HZ);
+
+               if (!dcon->switched)
+                       pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
+
+               /* Turn off the scanline interrupt */
+               if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
+                       pr_err("couldn't disable scanline interrupt!\n");
+
+               /*
+                * Ideally we'd like to disable interrupts here so that the
+                * fb unblanking and DCON turn on happen at a known time value;
+                * however, we can't do that right now with fb_blank
+                * messing with semaphores.
+                *
+                * For now, we just hope..
+                */
+               if (!dcon_blank_fb(dcon, false)) {
+                       pr_err("Failed to enter CPU mode\n");
+                       dcon->pending_src = DCON_SOURCE_DCON;
+                       return;
+               }
+
+               /* And turn off the DCON */
+               pdata->set_dconload(1);
+               dcon->load_time = ktime_get();
+
+               pr_info("The CPU has control\n");
+               break;
+       case DCON_SOURCE_DCON:
+       {
+               ktime_t delta_t;
+
+               pr_info("dcon_source_switch to DCON\n");
+
+               /* Clear DCONLOAD - this implies that the DCON is in control */
+               pdata->set_dconload(0);
+               dcon->load_time = ktime_get();
+
+               wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
+
+               if (!dcon->switched) {
+                       pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
+               } else {
+                       /* sometimes the DCON doesn't follow its own rules,
+                        * and doesn't wait for two vsync pulses before
+                        * ack'ing the frame load with an IRQ.  the result
+                        * is that the display shows the *previously*
+                        * loaded frame.  we can detect this by looking at
+                        * the time between asserting DCONLOAD and the IRQ --
+                        * if it's less than 20msec, then the DCON couldn't
+                        * have seen two VSYNC pulses.  in that case we
+                        * deassert and reassert, and hope for the best.
+                        * see http://dev.laptop.org/ticket/9664
+                        */
+                       delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
+                       if (dcon->switched && ktime_to_ns(delta_t)
+                           < NSEC_PER_MSEC * 20) {
+                               pr_err("missed loading, retrying\n");
+                               pdata->set_dconload(1);
+                               mdelay(41);
+                               pdata->set_dconload(0);
+                               dcon->load_time = ktime_get();
+                               mdelay(41);
+                       }
+               }
+
+               dcon_blank_fb(dcon, true);
+               pr_info("The DCON has control\n");
+               break;
+       }
+       default:
+               BUG();
+       }
+
+       dcon->curr_src = source;
+}
+
+static void dcon_set_source(struct dcon_priv *dcon, int arg)
+{
+       if (dcon->pending_src == arg)
+               return;
+
+       dcon->pending_src = arg;
+
+       if (dcon->curr_src != arg)
+               schedule_work(&dcon->switch_source);
+}
+
+static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
+{
+       dcon_set_source(dcon, arg);
+       flush_scheduled_work();
+}
+
+static ssize_t dcon_mode_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%4.4X\n", dcon->disp_mode);
+}
+
+static ssize_t dcon_sleep_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", dcon->asleep);
+}
+
+static ssize_t dcon_freeze_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
+}
+
+static ssize_t dcon_mono_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", dcon->mono);
+}
+
+static ssize_t dcon_resumeline_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", resumeline);
+}
+
+static ssize_t dcon_mono_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long enable_mono;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &enable_mono);
+       if (rc)
+               return rc;
+
+       dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
+
+       return count;
+}
+
+static ssize_t dcon_freeze_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct dcon_priv *dcon = dev_get_drvdata(dev);
+       unsigned long output;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &output);
+       if (ret)
+               return ret;
+
+       pr_info("dcon_freeze_store: %lu\n", output);
+
+       switch (output) {
+       case 0:
+               dcon_set_source(dcon, DCON_SOURCE_CPU);
+               break;
+       case 1:
+               dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+               break;
+       case 2:  /* normally unused */
+               dcon_set_source(dcon, DCON_SOURCE_DCON);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static ssize_t dcon_resumeline_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned short rl;
+       int rc;
+
+       rc = kstrtou16(buf, 10, &rl);
+       if (rc)
+               return rc;
+
+       resumeline = rl;
+       dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
+
+       return count;
+}
+
+static ssize_t dcon_sleep_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long output;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &output);
+       if (ret)
+               return ret;
+
+       dcon_sleep(dev_get_drvdata(dev), output ? true : false);
+       return count;
+}
+
+static struct device_attribute dcon_device_files[] = {
+       __ATTR(mode, 0444, dcon_mode_show, NULL),
+       __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
+       __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
+       __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
+       __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
+};
+
+static int dcon_bl_update(struct backlight_device *dev)
+{
+       struct dcon_priv *dcon = bl_get_data(dev);
+       u8 level = dev->props.brightness & 0x0F;
+
+       if (dev->props.power != FB_BLANK_UNBLANK)
+               level = 0;
+
+       if (level != dcon->bl_val)
+               dcon_set_backlight(dcon, level);
+
+       /* power down the DCON when the screen is blanked */
+       if (!dcon->ignore_fb_events)
+               dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
+
+       return 0;
+}
+
+static int dcon_bl_get(struct backlight_device *dev)
+{
+       struct dcon_priv *dcon = bl_get_data(dev);
+
+       return dcon->bl_val;
+}
+
+static const struct backlight_ops dcon_bl_ops = {
+       .update_status = dcon_bl_update,
+       .get_brightness = dcon_bl_get,
+};
+
+static struct backlight_properties dcon_bl_props = {
+       .max_brightness = 15,
+       .type = BACKLIGHT_RAW,
+       .power = FB_BLANK_UNBLANK,
+};
+
+static int dcon_reboot_notify(struct notifier_block *nb,
+                             unsigned long foo, void *bar)
+{
+       struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
+
+       if (!dcon || !dcon->client)
+               return NOTIFY_DONE;
+
+       /* Turn off the DCON. Entirely. */
+       dcon_write(dcon, DCON_REG_MODE, 0x39);
+       dcon_write(dcon, DCON_REG_MODE, 0x32);
+       return NOTIFY_DONE;
+}
+
+static int unfreeze_on_panic(struct notifier_block *nb,
+                            unsigned long e, void *p)
+{
+       pdata->set_dconload(1);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block dcon_panic_nb = {
+       .notifier_call = unfreeze_on_panic,
+};
+
+static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+       strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
+
+       return 0;
+}
+
+static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct dcon_priv *dcon;
+       int rc, i, j;
+
+       if (!pdata)
+               return -ENXIO;
+
+       dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
+       if (!dcon)
+               return -ENOMEM;
+
+       dcon->client = client;
+       init_waitqueue_head(&dcon->waitq);
+       INIT_WORK(&dcon->switch_source, dcon_source_switch);
+       dcon->reboot_nb.notifier_call = dcon_reboot_notify;
+       dcon->reboot_nb.priority = -1;
+
+       i2c_set_clientdata(client, dcon);
+
+       if (num_registered_fb < 1) {
+               dev_err(&client->dev, "DCON driver requires a registered fb\n");
+               rc = -EIO;
+               goto einit;
+       }
+       dcon->fbinfo = registered_fb[0];
+
+       rc = dcon_hw_init(dcon, 1);
+       if (rc)
+               goto einit;
+
+       /* Add the DCON device */
+
+       dcon_device = platform_device_alloc("dcon", -1);
+
+       if (!dcon_device) {
+               pr_err("Unable to create the DCON device\n");
+               rc = -ENOMEM;
+               goto eirq;
+       }
+       rc = platform_device_add(dcon_device);
+       platform_set_drvdata(dcon_device, dcon);
+
+       if (rc) {
+               pr_err("Unable to add the DCON device\n");
+               goto edev;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
+               rc = device_create_file(&dcon_device->dev,
+                                       &dcon_device_files[i]);
+               if (rc) {
+                       dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
+                       goto ecreate;
+               }
+       }
+
+       dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
+
+       /* Add the backlight device for the DCON */
+       dcon_bl_props.brightness = dcon->bl_val;
+       dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
+               dcon, &dcon_bl_ops, &dcon_bl_props);
+       if (IS_ERR(dcon->bl_dev)) {
+               dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
+                               PTR_ERR(dcon->bl_dev));
+               dcon->bl_dev = NULL;
+       }
+
+       register_reboot_notifier(&dcon->reboot_nb);
+       atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
+
+       return 0;
+
+ ecreate:
+       for (j = 0; j < i; j++)
+               device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
+ edev:
+       platform_device_unregister(dcon_device);
+       dcon_device = NULL;
+ eirq:
+       free_irq(DCON_IRQ, dcon);
+ einit:
+       kfree(dcon);
+       return rc;
+}
+
+static int dcon_remove(struct i2c_client *client)
+{
+       struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+       unregister_reboot_notifier(&dcon->reboot_nb);
+       atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
+
+       free_irq(DCON_IRQ, dcon);
+
+       backlight_device_unregister(dcon->bl_dev);
+
+       if (dcon_device)
+               platform_device_unregister(dcon_device);
+       cancel_work_sync(&dcon->switch_source);
+
+       kfree(dcon);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int dcon_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+       if (!dcon->asleep) {
+               /* Set up the DCON to have the source */
+               dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+       }
+
+       return 0;
+}
+
+static int dcon_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+       if (!dcon->asleep) {
+               dcon_bus_stabilize(dcon, 0);
+               dcon_set_source(dcon, DCON_SOURCE_CPU);
+       }
+
+       return 0;
+}
+
+#else
+
+#define dcon_suspend NULL
+#define dcon_resume NULL
+
+#endif /* CONFIG_PM */
+
+irqreturn_t dcon_interrupt(int irq, void *id)
+{
+       struct dcon_priv *dcon = id;
+       u8 status;
+
+       if (pdata->read_status(&status))
+               return IRQ_NONE;
+
+       switch (status & 3) {
+       case 3:
+               pr_debug("DCONLOAD_MISSED interrupt\n");
+               break;
+
+       case 2: /* switch to DCON mode */
+       case 1: /* switch to CPU mode */
+               dcon->switched = true;
+               dcon->irq_time = ktime_get();
+               wake_up(&dcon->waitq);
+               break;
+
+       case 0:
+               /* workaround resume case:  the DCON (on 1.5) doesn't
+                * ever assert status 0x01 when switching to CPU mode
+                * during resume.  this is because DCONLOAD is de-asserted
+                * _immediately_ upon exiting S3, so the actual release
+                * of the DCON happened long before this point.
+                * see http://dev.laptop.org/ticket/9869
+                */
+               if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
+                       dcon->switched = true;
+                       dcon->irq_time = ktime_get();
+                       wake_up(&dcon->waitq);
+                       pr_debug("switching w/ status 0/0\n");
+               } else {
+                       pr_debug("scanline interrupt w/CPU\n");
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static const struct dev_pm_ops dcon_pm_ops = {
+       .suspend = dcon_suspend,
+       .resume = dcon_resume,
+};
+
+static const struct i2c_device_id dcon_idtable[] = {
+       { "olpc_dcon",  0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, dcon_idtable);
+
+static struct i2c_driver dcon_driver = {
+       .driver = {
+               .name   = "olpc_dcon",
+               .pm = &dcon_pm_ops,
+       },
+       .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
+       .id_table = dcon_idtable,
+       .probe = dcon_probe,
+       .remove = dcon_remove,
+       .detect = dcon_detect,
+       .address_list = normal_i2c,
+};
+
+static int __init olpc_dcon_init(void)
+{
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+       /* XO-1.5 */
+       if (olpc_board_at_least(olpc_board(0xd0)))
+               pdata = &dcon_pdata_xo_1_5;
+#endif
+#ifdef CONFIG_FB_OLPC_DCON_1
+       if (!pdata)
+               pdata = &dcon_pdata_xo_1;
+#endif
+
+       return i2c_add_driver(&dcon_driver);
+}
+
+static void __exit olpc_dcon_exit(void)
+{
+       i2c_del_driver(&dcon_driver);
+}
+
+module_init(olpc_dcon_init);
+module_exit(olpc_dcon_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644 (file)
index 0000000..215e7ec
--- /dev/null
@@ -0,0 +1,111 @@
+#ifndef OLPC_DCON_H_
+#define OLPC_DCON_H_
+
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+
+/* DCON registers */
+
+#define DCON_REG_ID             0
+#define DCON_REG_MODE           1
+
+#define MODE_PASSTHRU  (1<<0)
+#define MODE_SLEEP     (1<<1)
+#define MODE_SLEEP_AUTO        (1<<2)
+#define MODE_BL_ENABLE (1<<3)
+#define MODE_BLANK     (1<<4)
+#define MODE_CSWIZZLE  (1<<5)
+#define MODE_COL_AA    (1<<6)
+#define MODE_MONO_LUMA (1<<7)
+#define MODE_SCAN_INT  (1<<8)
+#define MODE_CLOCKDIV  (1<<9)
+#define MODE_DEBUG     (1<<14)
+#define MODE_SELFTEST  (1<<15)
+
+#define DCON_REG_HRES          0x2
+#define DCON_REG_HTOTAL                0x3
+#define DCON_REG_HSYNC_WIDTH   0x4
+#define DCON_REG_VRES          0x5
+#define DCON_REG_VTOTAL                0x6
+#define DCON_REG_VSYNC_WIDTH   0x7
+#define DCON_REG_TIMEOUT       0x8
+#define DCON_REG_SCAN_INT      0x9
+#define DCON_REG_BRIGHT                0xa
+#define DCON_REG_MEM_OPT_A     0x41
+#define DCON_REG_MEM_OPT_B     0x42
+
+/* Load Delay Locked Loop (DLL) settings for clock delay */
+#define MEM_DLL_CLOCK_DELAY    (1<<0)
+/* Memory controller power down function */
+#define MEM_POWER_DOWN         (1<<8)
+/* Memory controller software reset */
+#define MEM_SOFT_RESET         (1<<0)
+
+/* Status values */
+
+#define DCONSTAT_SCANINT       0
+#define DCONSTAT_SCANINT_DCON  1
+#define DCONSTAT_DISPLAYLOAD   2
+#define DCONSTAT_MISSED                3
+
+/* Source values */
+
+#define DCON_SOURCE_DCON        0
+#define DCON_SOURCE_CPU         1
+
+/* Interrupt */
+#define DCON_IRQ                6
+
+struct dcon_priv {
+       struct i2c_client *client;
+       struct fb_info *fbinfo;
+       struct backlight_device *bl_dev;
+
+       wait_queue_head_t waitq;
+       struct work_struct switch_source;
+       struct notifier_block reboot_nb;
+
+       /* Shadow register for the DCON_REG_MODE register */
+       u8 disp_mode;
+
+       /* The current backlight value - this saves us some smbus traffic */
+       u8 bl_val;
+
+       /* Current source, initialized at probe time */
+       int curr_src;
+
+       /* Desired source */
+       int pending_src;
+
+       /* Variables used during switches */
+       bool switched;
+       ktime_t irq_time;
+       ktime_t load_time;
+
+       /* Current output type; true == mono, false == color */
+       bool mono;
+       bool asleep;
+       /* This get set while controlling fb blank state from the driver */
+       bool ignore_fb_events;
+};
+
+struct dcon_platform_data {
+       int (*init)(struct dcon_priv *);
+       void (*bus_stabilize_wiggle)(void);
+       void (*set_dconload)(int);
+       int (*read_status)(u8 *);
+};
+
+#include <linux/interrupt.h>
+
+irqreturn_t dcon_interrupt(int irq, void *id);
+
+#ifdef CONFIG_FB_OLPC_DCON_1
+extern struct dcon_platform_data dcon_pdata_xo_1;
+#endif
+
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+extern struct dcon_platform_data dcon_pdata_xo_1_5;
+#endif
+
+#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644 (file)
index 0000000..0c5a10c
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007  Red Hat, Inc.
+ * Copyright © 2006-2007  Advanced Micro Devices, Inc.
+ * Copyright © 2009       VIA Technology, Inc.
+ * Copyright (c) 2010  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+static int dcon_init_xo_1(struct dcon_priv *dcon)
+{
+       unsigned char lob;
+
+       if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+               pr_err("failed to request STAT0 GPIO\n");
+               return -EIO;
+       }
+       if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+               pr_err("failed to request STAT1 GPIO\n");
+               goto err_gp_stat1;
+       }
+       if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+               pr_err("failed to request IRQ GPIO\n");
+               goto err_gp_irq;
+       }
+       if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+               pr_err("failed to request LOAD GPIO\n");
+               goto err_gp_load;
+       }
+       if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+               pr_err("failed to request BLANK GPIO\n");
+               goto err_gp_blank;
+       }
+
+       /* Turn off the event enable for GPIO7 just to be safe */
+       cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+       /*
+        * Determine the current state by reading the GPIO bit; earlier
+        * stages of the boot process have established the state.
+        *
+        * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
+        * this is because OFW will disable input for the pin and set a value..
+        * READ_BACK will only contain a valid value if input is enabled and
+        * then a value is set.  So, future readings of the pin can use
+        * READ_BACK, but the first one cannot.  Awesome, huh?
+        */
+       dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+               ? DCON_SOURCE_CPU
+               : DCON_SOURCE_DCON;
+       dcon->pending_src = dcon->curr_src;
+
+       /* Set the directions for the GPIO pins */
+       gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+       gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+       gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+       gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+       gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+                       dcon->curr_src == DCON_SOURCE_CPU);
+
+       /* Set up the interrupt mappings */
+
+       /* Set the IRQ to pair 2 */
+       cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
+
+       /* Enable group 2 to trigger the DCON interrupt */
+       cs5535_gpio_set_irq(2, DCON_IRQ);
+
+       /* Select edge level for interrupt (in PIC) */
+       lob = inb(0x4d0);
+       lob &= ~(1 << DCON_IRQ);
+       outb(lob, 0x4d0);
+
+       /* Register the interrupt handler */
+       if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
+               pr_err("failed to request DCON's irq\n");
+               goto err_req_irq;
+       }
+
+       /* Clear INV_EN for GPIO7 (DCONIRQ) */
+       cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
+
+       /* Enable filter for GPIO12 (DCONBLANK) */
+       cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
+
+       /* Disable filter for GPIO7 */
+       cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
+
+       /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+       cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+       cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
+
+       /* Add GPIO12 to the Filter Event Pair #7 */
+       cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
+
+       /* Turn off negative Edge Enable for GPIO12 */
+       cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
+
+       /* Enable negative Edge Enable for GPIO7 */
+       cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
+
+       /* Zero the filter amount for Filter Event Pair #7 */
+       cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
+
+       /* Clear the negative edge status for GPIO7 and GPIO12 */
+       cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+       cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
+
+       /* FIXME:  Clear the positive status as well, just to be sure */
+       cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+       cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
+
+       /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+       cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+       cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
+
+       return 0;
+
+err_req_irq:
+       gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+       gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+       gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+       gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+       gpio_free(OLPC_GPIO_DCON_STAT0);
+       return -EIO;
+}
+
+static void dcon_wiggle_xo_1(void)
+{
+       int x;
+
+       /*
+        * According to HiMax, when powering the DCON up we should hold
+        * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
+        * state machine to reset to a (sane) initial state.  Mitch Bradley
+        * did some testing and discovered that holding for 16 SMB_CLK cycles
+        * worked a lot more reliably, so that's what we do here.
+        *
+        * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
+        * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
+        * GPIO15.
+        */
+       cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+       cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+       cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+       cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+       cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+
+       for (x = 0; x < 16; x++) {
+               udelay(5);
+               cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+               udelay(5);
+               cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+       }
+       udelay(5);
+       cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+       cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+       cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+       cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+}
+
+static void dcon_set_dconload_1(int val)
+{
+       gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
+}
+
+static int dcon_read_status_xo_1(u8 *status)
+{
+       *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+       *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
+       /* Clear the negative edge status for GPIO7 */
+       cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+
+       return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1 = {
+       .init = dcon_init_xo_1,
+       .bus_stabilize_wiggle = dcon_wiggle_xo_1,
+       .set_dconload = dcon_set_dconload_1,
+       .read_status = dcon_read_status_xo_1,
+};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644 (file)
index 0000000..6a4d379
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2009,2010       One Laptop per Child
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/olpc.h>
+
+/* TODO: this eventually belongs in linux/vx855.h */
+#define NR_VX855_GPI    14
+#define NR_VX855_GPO    13
+#define NR_VX855_GPIO   15
+
+#define VX855_GPI(n)    (n)
+#define VX855_GPO(n)    (NR_VX855_GPI + (n))
+#define VX855_GPIO(n)   (NR_VX855_GPI + NR_VX855_GPO + (n))
+
+#include "olpc_dcon.h"
+
+/* Hardware setup on the XO 1.5:
+ *     DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
+ *     DCONBLANK connects to VX855_GPIO8 (not SSPICLK)  unused in driver
+ *     DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
+ *     DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
+ *     DCONIRQ connects to VX855_GPIO12
+ *     DCONSMBDATA connects to VX855 graphics CRTSPD
+ *     DCONSMBCLK connects to VX855 graphics CRTSPCLK
+ */
+
+#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
+#define VX855_GPI_STATUS_CHG 0x450  /* PMIO_Rx50 */
+#define VX855_GPI_SCI_SMI 0x452  /* PMIO_Rx52 */
+#define BIT_GPIO12 0x40
+
+#define PREFIX "OLPC DCON:"
+
+static void dcon_clear_irq(void)
+{
+       /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
+       outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
+}
+
+static int dcon_was_irq(void)
+{
+       u_int8_t tmp;
+
+       /* irq status will appear in PMIO_Rx50[6] on gpio12 */
+       tmp = inb(VX855_GPI_STATUS_CHG);
+       return !!(tmp & BIT_GPIO12);
+
+       return 0;
+}
+
+static int dcon_init_xo_1_5(struct dcon_priv *dcon)
+{
+       unsigned int irq;
+
+       dcon_clear_irq();
+
+       /* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+       outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+
+       /* Determine the current state of DCONLOAD, likely set by firmware */
+       /* GPIO1 */
+       dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
+                       DCON_SOURCE_CPU : DCON_SOURCE_DCON;
+       dcon->pending_src = dcon->curr_src;
+
+       /* we're sharing the IRQ with ACPI */
+       irq = acpi_gbl_FADT.sci_interrupt;
+       if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
+               pr_err("DCON (IRQ%d) allocation failed\n", irq);
+               return 1;
+       }
+
+       return 0;
+}
+
+static void set_i2c_line(int sda, int scl)
+{
+       unsigned char tmp;
+       unsigned int port = 0x26;
+
+       /* FIXME: This directly accesses the CRT GPIO controller !!! */
+       outb(port, 0x3c4);
+       tmp = inb(0x3c5);
+
+       if (scl)
+               tmp |= 0x20;
+       else
+               tmp &= ~0x20;
+
+       if (sda)
+               tmp |= 0x10;
+       else
+               tmp &= ~0x10;
+
+       tmp |= 0x01;
+
+       outb(port, 0x3c4);
+       outb(tmp, 0x3c5);
+}
+
+
+static void dcon_wiggle_xo_1_5(void)
+{
+       int x;
+
+       /*
+        * According to HiMax, when powering the DCON up we should hold
+        * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
+        * state machine to reset to a (sane) initial state.  Mitch Bradley
+        * did some testing and discovered that holding for 16 SMB_CLK cycles
+        * worked a lot more reliably, so that's what we do here.
+        */
+       set_i2c_line(1, 1);
+
+       for (x = 0; x < 16; x++) {
+               udelay(5);
+               set_i2c_line(1, 0);
+               udelay(5);
+               set_i2c_line(1, 1);
+       }
+       udelay(5);
+
+       /* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+       outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+}
+
+static void dcon_set_dconload_xo_1_5(int val)
+{
+       gpio_set_value(VX855_GPIO(1), val);
+}
+
+static int dcon_read_status_xo_1_5(u8 *status)
+{
+       if (!dcon_was_irq())
+               return -1;
+
+       /* i believe this is the same as "inb(0x44b) & 3" */
+       *status = gpio_get_value(VX855_GPI(10));
+       *status |= gpio_get_value(VX855_GPI(11)) << 1;
+
+       dcon_clear_irq();
+
+       return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1_5 = {
+       .init = dcon_init_xo_1_5,
+       .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
+       .set_dconload = dcon_set_dconload_xo_1_5,
+       .read_status = dcon_read_status_xo_1_5,
+};
index 3e668d852f03029985b131544d67b2b47fe45376..a925fb0db70603f3ec64b99c92bbdcf365463fc3 100644 (file)
@@ -2,6 +2,7 @@ config INFINIBAND_HFI1
        tristate "Intel OPA Gen1 support"
        depends on X86_64 && INFINIBAND_RDMAVT
        select MMU_NOTIFIER
+       select CRC32
        default m
        ---help---
        This is a low-level driver for Intel OPA Gen1 adapter.
index 05de0dad8762eb4332e36ec100c072f4e6ce4ea3..4c6f1d7d2eaf21d17bffa1cc2f0f624052e16f22 100644 (file)
@@ -3,4 +3,4 @@ July, 2015
 - Remove unneeded file entries in sysfs
 - Remove software processing of IB protocol and place in library for use
   by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
index 8396dc5fb6c1899bc32b775e19877b00bfd0151f..c1c5bf82addb07aba003790069b7d36574b112ac 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/vmalloc.h>
 #include <linux/io.h>
 
+#include <rdma/ib.h>
+
 #include "hfi.h"
 #include "pio.h"
 #include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
        int uctxt_required = 1;
        int must_be_root = 0;
 
+       /* FIXME: This interface cannot continue out of staging */
+       if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+               return -EACCES;
+
        if (count < sizeof(cmd)) {
                ret = -EINVAL;
                goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 
        dd->rcd[uctxt->ctxt] = NULL;
+
+       hfi1_user_exp_rcv_free(fdata);
+       hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
        uctxt->rcvwait_to = 0;
        uctxt->piowait_to = 0;
        uctxt->rcvnowait = 0;
        uctxt->pionowait = 0;
        uctxt->event_flags = 0;
 
-       hfi1_user_exp_rcv_free(fdata);
-       hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
        hfi1_stats.sps_ctxts--;
        if (++dd->freectxts == dd->num_user_contexts)
                aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
 
 static int user_init(struct file *fp)
 {
-       int ret;
        unsigned int rcvctrl_ops = 0;
        struct hfi1_filedata *fd = fp->private_data;
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
 
        /* make sure that the context has already been setup */
-       if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
-               ret = -EFAULT;
-               goto done;
-       }
-
-       /*
-        * Subctxts don't need to initialize anything since master
-        * has done it.
-        */
-       if (fd->subctxt) {
-               ret = wait_event_interruptible(uctxt->wait, !test_bit(
-                                              HFI1_CTXT_MASTER_UNINIT,
-                                              &uctxt->event_flags));
-               goto expected;
-       }
+       if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+               return -EFAULT;
 
        /* initialize poll variables... */
        uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
                wake_up(&uctxt->wait);
        }
 
-expected:
-       /*
-        * Expected receive has to be setup for all processes (including
-        * shared contexts). However, it has to be done after the master
-        * context has been fully configured as it depends on the
-        * eager/expected split of the RcvArray entries.
-        * Setting it up here ensures that the subcontexts will be waiting
-        * (due to the above wait_event_interruptible() until the master
-        * is setup.
-        */
-       ret = hfi1_user_exp_rcv_init(fp);
-done:
-       return ret;
+       return 0;
 }
 
 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
        int ret = 0;
 
        /*
-        * Context should be set up only once (including allocation and
+        * Context should be set up only onceincluding allocation and
         * programming of eager buffers. This is done if context sharing
         * is not requested or by the master process.
         */
@@ -1282,8 +1263,27 @@ static int setup_ctxt(struct file *fp)
                        if (ret)
                                goto done;
                }
+       } else {
+               ret = wait_event_interruptible(uctxt->wait, !test_bit(
+                                              HFI1_CTXT_MASTER_UNINIT,
+                                              &uctxt->event_flags));
+               if (ret)
+                       goto done;
        }
+
        ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
+       if (ret)
+               goto done;
+       /*
+        * Expected receive has to be setup for all processes (including
+        * shared contexts). However, it has to be done after the master
+        * context has been fully configured as it depends on the
+        * eager/expected split of the RcvArray entries.
+        * Setting it up here ensures that the subcontexts will be waiting
+        * (due to the above wait_event_interruptible() until the master
+        * is setup.
+        */
+       ret = hfi1_user_exp_rcv_init(fp);
        if (ret)
                goto done;
 
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
 {
        struct hfi1_devdata *dd = filp->private_data;
 
-       switch (whence) {
-       case SEEK_SET:
-               break;
-       case SEEK_CUR:
-               offset += filp->f_pos;
-               break;
-       case SEEK_END:
-               offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
-                       offset;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (offset < 0)
-               return -EINVAL;
-
-       if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
-               return -EINVAL;
-
-       filp->f_pos = offset;
-
-       return filp->f_pos;
+       return fixed_size_llseek(filp, offset, whence,
+               (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
 }
 
 /* NOTE: assumes unsigned long is 8 bytes */
index c7ad0164ea9a615a856d4f3f9ece91ac34615cad..b3f0682a36c95ffdd15f1b4d5d2f8cfb2e2afac3 100644 (file)
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
                                            struct mm_struct *,
                                            unsigned long, unsigned long);
 static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+                                       struct mm_struct *,
                                        unsigned long, unsigned long);
 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
                                           unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
                        rbnode = rb_entry(node, struct mmu_rb_node, node);
                        rb_erase(node, root);
                        if (handler->ops->remove)
-                               handler->ops->remove(root, rbnode, false);
+                               handler->ops->remove(root, rbnode, NULL);
                }
        }
 
@@ -176,7 +177,7 @@ unlock:
        return ret;
 }
 
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
                                           unsigned long addr,
                                           unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
        return node;
 }
 
+/* Caller must *not* hold handler lock. */
 static void __mmu_rb_remove(struct mmu_rb_handler *handler,
-                           struct mmu_rb_node *node, bool arg)
+                           struct mmu_rb_node *node, struct mm_struct *mm)
 {
+       unsigned long flags;
+
        /* Validity of handler and node pointers has been checked by caller. */
        hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
                  node->len);
+       spin_lock_irqsave(&handler->lock, flags);
        __mmu_int_rb_remove(node, handler->root);
+       spin_unlock_irqrestore(&handler->lock, flags);
+
        if (handler->ops->remove)
-               handler->ops->remove(handler->root, node, arg);
+               handler->ops->remove(handler->root, node, mm);
 }
 
 struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
 void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
 {
        struct mmu_rb_handler *handler = find_mmu_handler(root);
-       unsigned long flags;
 
        if (!handler || !node)
                return;
 
-       spin_lock_irqsave(&handler->lock, flags);
-       __mmu_rb_remove(handler, node, false);
-       spin_unlock_irqrestore(&handler->lock, flags);
+       __mmu_rb_remove(handler, node, NULL);
 }
 
 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
 static inline void mmu_notifier_page(struct mmu_notifier *mn,
                                     struct mm_struct *mm, unsigned long addr)
 {
-       mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+       mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
 }
 
 static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
                                            unsigned long start,
                                            unsigned long end)
 {
-       mmu_notifier_mem_invalidate(mn, start, end);
+       mmu_notifier_mem_invalidate(mn, mm, start, end);
 }
 
 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+                                       struct mm_struct *mm,
                                        unsigned long start, unsigned long end)
 {
        struct mmu_rb_handler *handler =
                container_of(mn, struct mmu_rb_handler, mn);
        struct rb_root *root = handler->root;
-       struct mmu_rb_node *node;
+       struct mmu_rb_node *node, *ptr = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&handler->lock, flags);
-       for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
-            node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+       for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+            node; node = ptr) {
+               /* Guard against node removal. */
+               ptr = __mmu_int_rb_iter_next(node, start, end - 1);
                hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
                          node->addr, node->len);
-               if (handler->ops->invalidate(root, node))
-                       __mmu_rb_remove(handler, node, true);
+               if (handler->ops->invalidate(root, node)) {
+                       spin_unlock_irqrestore(&handler->lock, flags);
+                       __mmu_rb_remove(handler, node, mm);
+                       spin_lock_irqsave(&handler->lock, flags);
+               }
        }
        spin_unlock_irqrestore(&handler->lock, flags);
 }
index f8523fdb8a18d1f89334b38bfa1a2415b5d373e0..19a306e83c7df54be68a8503537f247142bffbf8 100644 (file)
@@ -59,7 +59,8 @@ struct mmu_rb_node {
 struct mmu_rb_ops {
        bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
        int (*insert)(struct rb_root *, struct mmu_rb_node *);
-       void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+       void (*remove)(struct rb_root *, struct mmu_rb_node *,
+                      struct mm_struct *);
        int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
 };
 
index 29a5ad28019b7391b615d726ad4df453dac5c0e7..dc9119e1b458ee8117de5520218b216343185ce4 100644 (file)
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
         * do the flush work until that QP's
         * sdma work has finished.
         */
+       spin_lock(&qp->s_lock);
        if (qp->s_flags & RVT_S_WAIT_DMA) {
                qp->s_flags &= ~RVT_S_WAIT_DMA;
                hfi1_schedule_send(qp);
        }
+       spin_unlock(&qp->s_lock);
 }
 
 /**
index 0861e095df8d458c3ecabb908c5345ed9a872c6b..8bd56d5c783dede5e59772eb88daf192e0dbb457 100644 (file)
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
 static int set_rcvarray_entry(struct file *, unsigned long, u32,
                              struct tid_group *, struct page **, unsigned);
 static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+                         struct mm_struct *);
 static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
 static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
                            struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
        struct tid_group *grp, *gptr;
 
+       if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+               return 0;
        /*
         * The notifier would have been removed when the process'es mm
         * was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
        if (!node || node->rcventry != (uctxt->expected_base + rcventry))
                return -EBADF;
        if (HFI1_CAP_IS_USET(TID_UNMAP))
-               mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+               mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
        else
                hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
 
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
                                        continue;
                                if (HFI1_CAP_IS_USET(TID_UNMAP))
                                        mmu_rb_remove(&fd->tid_rb_root,
-                                                     &node->mmu, false);
+                                                     &node->mmu, NULL);
                                else
                                        hfi1_mmu_rb_remove(&fd->tid_rb_root,
                                                           &node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
 }
 
 static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
-                         bool notifier)
+                         struct mm_struct *mm)
 {
        struct hfi1_filedata *fdata =
                container_of(root, struct hfi1_filedata, tid_rb_root);
index ab6b6a42000f709020a001a2aa9594d0f2f5b851..d53a659548e0a78b2cc8b8777e2f0b45a64961c1 100644 (file)
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
 static void user_sdma_free_request(struct user_sdma_request *, bool);
 static int pin_vector_pages(struct user_sdma_request *,
                            struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+                              unsigned);
 static int check_header_template(struct user_sdma_request *,
                                 struct hfi1_pkt_header *, u32, u32);
 static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
 static void activate_packet_queue(struct iowait *, int);
 static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
 static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+                          struct mm_struct *);
 static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
 
 static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
        rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
                                     (unsigned long)iovec->iov.iov_base,
                                     iovec->iov.iov_len);
-       if (rb_node)
+       if (rb_node && !IS_ERR(rb_node))
                node = container_of(rb_node, struct sdma_mmu_node, rb);
+       else
+               rb_node = NULL;
 
        if (!node) {
                node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
                        goto bail;
                }
                if (pinned != npages) {
-                       unpin_vector_pages(current->mm, pages, pinned);
+                       unpin_vector_pages(current->mm, pages, node->npages,
+                                          pinned);
                        ret = -EFAULT;
                        goto bail;
                }
@@ -1147,9 +1152,9 @@ bail:
 }
 
 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
-                              unsigned npages)
+                              unsigned start, unsigned npages)
 {
-       hfi1_release_user_pages(mm, pages, npages, 0);
+       hfi1_release_user_pages(mm, pages + start, npages, 0);
        kfree(pages);
 }
 
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
                                &req->pq->sdma_rb_root,
                                (unsigned long)req->iovs[i].iov.iov_base,
                                req->iovs[i].iov.iov_len);
-                       if (!mnode)
+                       if (!mnode || IS_ERR(mnode))
                                continue;
 
                        node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
 }
 
 static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
-                          bool notifier)
+                          struct mm_struct *mm)
 {
        struct sdma_mmu_node *node =
                container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
        node->pq->n_locked -= node->npages;
        spin_unlock(&node->pq->evict_lock);
 
-       unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+       /*
+        * If mm is set, we are being called by the MMU notifier and we
+        * should not pass a mm_struct to unpin_vector_page(). This is to
+        * prevent a deadlock when hfi1_release_user_pages() attempts to
+        * take the mmap_sem, which the MMU notifier has already taken.
+        */
+       unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
                           node->npages);
        /*
         * If called by the MMU notifier, we have to adjust the pinned
         * page count ourselves.
         */
-       if (notifier)
-               current->mm->pinned_vm -= node->npages;
+       if (mm)
+               mm->pinned_vm -= node->npages;
        kfree(node);
 }
 
index 63bb87593af0ca0c42b60782d5808b2cec21bb60..d976e5e18d50ef7a469e437585da074d7f3b3af0 100644 (file)
@@ -155,7 +155,7 @@ static void mon_setup(struct net_device *dev)
        dev->netdev_ops = &mon_netdev_ops;
        dev->destructor = free_netdev;
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211;
        /*
         * Use a locally administered address (IEEE 802)
index 9b7cc7dc7cb82b7a35af095a812762b40e848c18..13a5ddc2bea577c68d17ebf930893194341f27a1 100644 (file)
@@ -1792,7 +1792,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
        __skb_queue_tail(&ring->queue, skb);
        pdesc->OWN = 1;
        spin_unlock_irqrestore(&priv->irq_th_lock, flags);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
        return 0;
index cfab715495adc13a341c099a815427bdfa438b02..62154e3f4463d906f2f48afb88cd70ce06991c17 100644 (file)
@@ -1991,7 +1991,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
                return 2;
 
        if (!time_after(jiffies,
-                       ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+                       dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
                return 0;
        if (!time_after(jiffies,
                        ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
index ae1274cfb392e3e868620541004abcde7efc172e..d705595766a9d8670cec9b029ce466798398973f 100644 (file)
@@ -249,7 +249,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
                                ieee->seq_ctrl[0]++;
 
                        /* avoid watchdog triggers */
-                       ieee->dev->trans_start = jiffies;
+                       netif_trans_update(ieee->dev);
                        ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
                        //dev_kfree_skb_any(skb);//edit by thomas
                }
@@ -302,7 +302,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
                        ieee->seq_ctrl[0]++;
 
                /* avoid watchdog triggers */
-               ieee->dev->trans_start = jiffies;
+               netif_trans_update(ieee->dev);
                ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
 
        }else{
@@ -1737,7 +1737,7 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
                return 2;
 
        if(!time_after(jiffies,
-                      ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+                      dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
                return 0;
 
        if(!time_after(jiffies,
@@ -2205,7 +2205,7 @@ static void ieee80211_resume_tx(struct ieee80211_device *ieee)
                                ieee->dev, ieee->rate);
                                //(i+1)<ieee->tx_pending.txb->nr_frags);
                        ieee->stats.tx_packets++;
-                       ieee->dev->trans_start = jiffies;
+                       netif_trans_update(ieee->dev);
                }
        }
 
index 849a95ef723c6e2d6602b51bbeba836b6f0ab63b..4af0140c6ead5a369797c04ca4d737cf96614097 100644 (file)
@@ -1108,7 +1108,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
 
        if (tcb_desc->queue_index != TXCMD_QUEUE) {
                if (tx_urb->status == 0) {
-                       dev->trans_start = jiffies;
+                       netif_trans_update(dev);
                        priv->stats.txoktotal++;
                        priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
                        priv->stats.txbytesunicast +=
@@ -1715,7 +1715,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
                                return -1;
                        }
                }
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
                return 0;
        }
index f4fff385aeb2d48d2d74ae0f3c1998cae053586e..7dd1540ebedd1458547561d2210383b1249ef91a 100644 (file)
@@ -2113,10 +2113,10 @@ static int on_action_public23a(struct rtw_adapter *padapter,
 
        if (channel <= RTW_CH_MAX_2G_CHANNEL)
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_2GHZ);
+                                                     NL80211_BAND_2GHZ);
        else
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_5GHZ);
+                                                     NL80211_BAND_5GHZ);
 
        if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe,
                             skb->len, 0))
index 3aa40a32555ed88904837710545c47c5e59217c8..634102e1bda6ad72301da92ad910fb821bc8b77d 100644 (file)
@@ -266,7 +266,7 @@ join_res:
 
 /* Represent channel details, subset of ieee80211_channel */
 struct rtw_ieee80211_channel {
-       /* enum ieee80211_band band; */
+       /* enum nl80211_band band; */
        /* u16 center_freq; */
        u16 hw_value;
        u32 flags;
index 12d18440e82448a354fb98760cf4a2383cc34f7c..0da559d929bcac9ac7d5f74d67b14b2c88546567 100644 (file)
@@ -39,7 +39,7 @@ static const u32 rtw_cipher_suites[] = {
 }
 
 #define CHAN2G(_channel, _freq, _flags) {                      \
-       .band                   = IEEE80211_BAND_2GHZ,          \
+       .band                   = NL80211_BAND_2GHZ,            \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -48,7 +48,7 @@ static const u32 rtw_cipher_suites[] = {
 }
 
 #define CHAN5G(_channel, _flags) {                             \
-       .band                   = IEEE80211_BAND_5GHZ,          \
+       .band                   = NL80211_BAND_5GHZ,            \
        .center_freq            = 5000 + (5 * (_channel)),      \
        .hw_value               = (_channel),                   \
        .flags                  = (_flags),                     \
@@ -143,15 +143,15 @@ static void rtw_5g_rates_init(struct ieee80211_rate *rates)
 }
 
 static struct ieee80211_supported_band *
-rtw_spt_band_alloc(enum ieee80211_band band)
+rtw_spt_band_alloc(enum nl80211_band band)
 {
        struct ieee80211_supported_band *spt_band = NULL;
        int n_channels, n_bitrates;
 
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                n_channels = RTW_2G_CHANNELS_NUM;
                n_bitrates = RTW_G_RATES_NUM;
-       } else if (band == IEEE80211_BAND_5GHZ) {
+       } else if (band == NL80211_BAND_5GHZ) {
                n_channels = RTW_5G_CHANNELS_NUM;
                n_bitrates = RTW_A_RATES_NUM;
        } else {
@@ -176,10 +176,10 @@ rtw_spt_band_alloc(enum ieee80211_band band)
        spt_band->n_channels = n_channels;
        spt_band->n_bitrates = n_bitrates;
 
-       if (band == IEEE80211_BAND_2GHZ) {
+       if (band == NL80211_BAND_2GHZ) {
                rtw_2g_channels_init(spt_band->channels);
                rtw_2g_rates_init(spt_band->bitrates);
-       } else if (band == IEEE80211_BAND_5GHZ) {
+       } else if (band == NL80211_BAND_5GHZ) {
                rtw_5g_channels_init(spt_band->channels);
                rtw_5g_rates_init(spt_band->bitrates);
        }
@@ -257,10 +257,10 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
        channel = pnetwork->network.DSConfig;
        if (channel <= RTW_CH_MAX_2G_CHANNEL)
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_2GHZ);
+                                                     NL80211_BAND_2GHZ);
        else
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_5GHZ);
+                                                     NL80211_BAND_5GHZ);
 
        notify_channel = ieee80211_get_channel(wiphy, freq);
 
@@ -322,11 +322,11 @@ void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter)
                if (channel <= RTW_CH_MAX_2G_CHANNEL)
                        freq =
                            ieee80211_channel_to_frequency(channel,
-                                                          IEEE80211_BAND_2GHZ);
+                                                          NL80211_BAND_2GHZ);
                else
                        freq =
                            ieee80211_channel_to_frequency(channel,
-                                                          IEEE80211_BAND_5GHZ);
+                                                          NL80211_BAND_5GHZ);
 
                notify_channel = ieee80211_get_channel(wiphy, freq);
 
@@ -2360,10 +2360,10 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
        channel = pmlmeext->cur_channel;
        if (channel <= RTW_CH_MAX_2G_CHANNEL)
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_2GHZ);
+                                                     NL80211_BAND_2GHZ);
        else
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_5GHZ);
+                                                     NL80211_BAND_5GHZ);
 
        cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len,
                         0);
@@ -2392,10 +2392,10 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
        channel = pmlmeext->cur_channel;
        if (channel <= RTW_CH_MAX_2G_CHANNEL)
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_2GHZ);
+                                                     NL80211_BAND_2GHZ);
        else
                freq = ieee80211_channel_to_frequency(channel,
-                                                     IEEE80211_BAND_5GHZ);
+                                                     NL80211_BAND_5GHZ);
 
        mgmt.frame_control =
                cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -3109,7 +3109,7 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
 };
 
 static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
-                                      enum ieee80211_band band, u8 rf_type)
+                                      enum nl80211_band band, u8 rf_type)
 {
 
 #define MAX_BIT_RATE_40MHZ_MCS15       300     /* Mbps */
@@ -3133,7 +3133,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
        ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
        /*
-        *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+        *hw->wiphy->bands[NL80211_BAND_2GHZ]
         *base on ant_num
         *rx_mask: RX mask
         *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -3173,19 +3173,19 @@ void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter)
 
        /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
        {
-               bands = wiphy->bands[IEEE80211_BAND_2GHZ];
+               bands = wiphy->bands[NL80211_BAND_2GHZ];
                if (bands)
                        rtw_cfg80211_init_ht_capab(&bands->ht_cap,
-                                                  IEEE80211_BAND_2GHZ,
+                                                  NL80211_BAND_2GHZ,
                                                   rf_type);
        }
 
        /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
        {
-               bands = wiphy->bands[IEEE80211_BAND_5GHZ];
+               bands = wiphy->bands[NL80211_BAND_5GHZ];
                if (bands)
                        rtw_cfg80211_init_ht_capab(&bands->ht_cap,
-                                                  IEEE80211_BAND_5GHZ,
+                                                  NL80211_BAND_5GHZ,
                                                   rf_type);
        }
 }
@@ -3224,11 +3224,11 @@ static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter,
        wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
 
        /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
-       wiphy->bands[IEEE80211_BAND_2GHZ] =
-           rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
+       wiphy->bands[NL80211_BAND_2GHZ] =
+           rtw_spt_band_alloc(NL80211_BAND_2GHZ);
        /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
-       wiphy->bands[IEEE80211_BAND_5GHZ] =
-           rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
+       wiphy->bands[NL80211_BAND_5GHZ] =
+           rtw_spt_band_alloc(NL80211_BAND_5GHZ);
 
        wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
        wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
@@ -3313,8 +3313,8 @@ void rtw_wdev_free(struct wireless_dev *wdev)
        if (!wdev)
                return;
 
-       kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
-       kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
+       kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+       kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]);
 
        wiphy_free(wdev->wiphy);
 
index 9ac1ef9d0d5126a9e59e42eaf37e97806684cacc..b7d43a5622bacfaa4aae74e627b15c0d930e78a3 100644 (file)
@@ -144,7 +144,7 @@ void vnt_init_bands(struct vnt_private *priv)
                        ch[i].flags = IEEE80211_CHAN_NO_HT40;
                }
 
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+               priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
                                                &vnt_supported_5ghz_band;
        /* fallthrough */
        case RF_RFMD2959:
@@ -159,7 +159,7 @@ void vnt_init_bands(struct vnt_private *priv)
                        ch[i].flags = IEEE80211_CHAN_NO_HT40;
                }
 
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
                                                &vnt_supported_2ghz_band;
                break;
        }
index c3eea07ca97ecee940172358878a5d32c52902bf..494164045a0f9a785581940e1470f77458aab9b6 100644 (file)
@@ -812,7 +812,7 @@ static int vnt_int_report_rate(struct vnt_private *priv,
                else if (fb_option & FIFOCTL_AUTO_FB_1)
                        tx_rate = fallback_rate1[tx_rate][retry];
 
-               if (info->band == IEEE80211_BAND_5GHZ)
+               if (info->band == NL80211_BAND_5GHZ)
                        idx = tx_rate - RATE_6M;
                else
                        idx = tx_rate;
@@ -1290,7 +1290,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
            (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
                set_channel(priv, conf->chandef.chan);
 
-               if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+               if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
                        bb_type = BB_TYPE_11A;
                else
                        bb_type = BB_TYPE_11G;
index 1a2dda09b69d70f706fc2420b633e34492935a5c..e4c3165ae027556238fabb2f38405d3fd57b7873 100644 (file)
@@ -1307,7 +1307,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
        }
 
        if (current_rate > RATE_11M) {
-               if (info->band == IEEE80211_BAND_5GHZ) {
+               if (info->band == NL80211_BAND_5GHZ) {
                        pkt_type = PK_TYPE_11A;
                } else {
                        if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
index a0fe288c132283d671d9be931bc1454c5c042d21..a4299f405d7f5dddc18a1a8287a76f9655ada80a 100644 (file)
@@ -153,7 +153,7 @@ void vnt_init_bands(struct vnt_private *priv)
                        ch[i].flags = IEEE80211_CHAN_NO_HT40;
                }
 
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+               priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
                                                &vnt_supported_5ghz_band;
        /* fallthrough */
        case RF_AL2230:
@@ -167,7 +167,7 @@ void vnt_init_bands(struct vnt_private *priv)
                        ch[i].flags = IEEE80211_CHAN_NO_HT40;
                }
 
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
                                                &vnt_supported_2ghz_band;
                break;
        }
index 8d05acbc0e238b76b0ff457c22be23eaa05c4aa7..73538fb4e4e2f80760022b1cde45dca9eaf5171b 100644 (file)
@@ -97,7 +97,7 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
                else if (context->fb_option == AUTO_FB_1)
                        tx_rate = fallback_rate1[tx_rate][retry];
 
-               if (info->band == IEEE80211_BAND_5GHZ)
+               if (info->band == NL80211_BAND_5GHZ)
                        idx = tx_rate - RATE_6M;
                else
                        idx = tx_rate;
index f9afab77b79fb5e5718d1b19a2df5e7cb1cd35fa..fc5fe4ec6d05edfc66d2855b81a1d795772a62a0 100644 (file)
@@ -662,7 +662,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
                        (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
                vnt_set_channel(priv, conf->chandef.chan->hw_value);
 
-               if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+               if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
                        bb_type = BB_TYPE_11A;
                else
                        bb_type = BB_TYPE_11G;
index b74e3200131843a762515ce66bc88ced94f3636b..aa59e7f14ab30c74a68c862b6b260d28b19c8949 100644 (file)
@@ -813,7 +813,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
        }
 
        if (current_rate > RATE_11M) {
-               if (info->band == IEEE80211_BAND_5GHZ) {
+               if (info->band == NL80211_BAND_5GHZ) {
                        pkt_type = PK_TYPE_11A;
                } else {
                        if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
index 448a5c8c451460756b7cfbd4dbff757fa48e03a1..544917d8b2df9272694584c5fdb9d41de885b1a5 100644 (file)
@@ -102,7 +102,7 @@ static u8 op_ifcs;
 u8 wilc_initialized = 1;
 
 #define CHAN2G(_channel, _freq, _flags) {       \
-               .band             = IEEE80211_BAND_2GHZ, \
+               .band             = NL80211_BAND_2GHZ, \
                .center_freq      = (_freq),             \
                .hw_value         = (_channel),          \
                .flags            = (_flags),            \
@@ -241,7 +241,7 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan)
                        struct ieee80211_channel *channel;
 
                        if (network_info) {
-                               freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+                               freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
                                channel = ieee80211_get_channel(wiphy, freq);
 
                                rssi = get_rssi_avg(network_info);
@@ -409,7 +409,7 @@ static void CfgScanResult(enum scan_event scan_event,
                                return;
 
                        if (network_info) {
-                               s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+                               s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
                                channel = ieee80211_get_channel(wiphy, s32Freq);
 
                                if (!channel)
@@ -1451,7 +1451,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
                        return;
                }
        } else {
-               s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
+               s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
 
                if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
                        if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
@@ -2246,7 +2246,7 @@ static struct wireless_dev *WILC_WFI_CfgAlloc(void)
        WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
        WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
 
-       wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
+       wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
 
        return wdev;
 
index 8bad018eda478d7f741bcc6950f5772e6f87afc9..2438cf7cc695cdbccf6b9e836f9a26ae9f8cf087 100644 (file)
@@ -415,7 +415,7 @@ static int prism2_scan(struct wiphy *wiphy,
                ie_len = ie_buf[1] + 2;
                memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
                freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
-                                                     IEEE80211_BAND_2GHZ);
+                                                     NL80211_BAND_2GHZ);
                bss = cfg80211_inform_bss(wiphy,
                        ieee80211_get_channel(wiphy, freq),
                        CFG80211_BSS_FTYPE_UNKNOWN,
@@ -758,9 +758,9 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
        priv->band.n_channels = ARRAY_SIZE(prism2_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
-       priv->band.band = IEEE80211_BAND_2GHZ;
+       priv->band.band = NL80211_BAND_2GHZ;
        priv->band.ht_cap.ht_supported = false;
-       wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+       wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
        set_wiphy_dev(wiphy, dev);
        wiphy->privid = prism2_wiphy_privid;
index 88255ce2871b5e1ad6ff7215ce38d2b30c8e8f0c..1f9dfba5dbb3a9843e60fe784ae3dd20408725ab 100644 (file)
@@ -393,7 +393,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
                goto failed;
        }
 
-       netdev->trans_start = jiffies;
+       netif_trans_update(netdev);
 
        netdev->stats.tx_packets++;
        /* count only the packet payload */
index a24443ba59ea04e452674403d014e507ea44b4d6..97e5b69e066852b1ae1eefd20161fde13c44d352 100644 (file)
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
        return 0;
 }
 
-static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
-{
-       struct iscsi_node_acl *acl = container_of(se_nacl,
-                       struct iscsi_node_acl, se_node_acl);
-
-       configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
-}
-
 /* End items for lio_target_acl_cit */
 
 /* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
        if (IS_ERR(tiqn))
                return ERR_CAST(tiqn);
 
+       pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+       pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+                       " %s\n", name);
+       return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+       struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
        config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
                        "iscsi_instance", &iscsi_stat_instance_cit);
        configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
                        "iscsi_logout_stats", &iscsi_stat_logout_cit);
        configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
                        &tiqn->tiqn_wwn.fabric_stat_group);
-
-
-       pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
-       pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
-                       " %s\n", name);
-       return &tiqn->tiqn_wwn;
 }
 
 static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn(
 {
        struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
 
-       configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
-
        pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
                        tiqn->tiqn);
        iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
        .aborted_task                   = lio_aborted_task,
        .fabric_make_wwn                = lio_target_call_coreaddtiqn,
        .fabric_drop_wwn                = lio_target_call_coredeltiqn,
+       .add_wwn_groups                 = lio_target_add_wwn_groups,
        .fabric_make_tpg                = lio_target_tiqn_addtpg,
        .fabric_drop_tpg                = lio_target_tiqn_deltpg,
        .fabric_make_np                 = lio_target_call_addnptotpg,
        .fabric_drop_np                 = lio_target_call_delnpfromtpg,
        .fabric_init_nodeacl            = lio_target_init_nodeacl,
-       .fabric_cleanup_nodeacl         = lio_target_cleanup_nodeacl,
 
        .tfc_discovery_attrs            = lio_target_discovery_auth_attrs,
        .tfc_wwn_attrs                  = lio_target_wwn_attrs,
index 428b0d9e3dbab246e579a027344dbc60f6a64098..57720385a7514bbbdd805de63bf6c155859e1c5e 100644 (file)
@@ -1283,9 +1283,8 @@ static int iscsit_do_rx_data(
        iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
                      count->iov, count->iov_count, data);
 
-       while (total_rx < data) {
-               rx_loop = sock_recvmsg(conn->sock, &msg,
-                                     (data - total_rx), MSG_WAITALL);
+       while (msg_data_left(&msg)) {
+               rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
                if (rx_loop <= 0) {
                        pr_debug("rx_loop: %d total_rx: %d\n",
                                rx_loop, total_rx);
index 1bd5c72b663e29a32d50c88dd0fccf344bb72f30..31a096aa16aba6dc4239cd412f813709709723f8 100644 (file)
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
 {
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
-       struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
 
-       if (tf->tf_ops->fabric_cleanup_nodeacl)
-               tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+       configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
        core_tpg_del_initiator_node_acl(se_nacl);
 }
 
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl(
        if (IS_ERR(se_nacl))
                return ERR_CAST(se_nacl);
 
-       if (tf->tf_ops->fabric_init_nodeacl) {
-               int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
-               if (ret) {
-                       core_tpg_del_initiator_node_acl(se_nacl);
-                       return ERR_PTR(ret);
-               }
-       }
-
        config_group_init_type_name(&se_nacl->acl_group, name,
                        &tf->tf_tpg_nacl_base_cit);
 
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl(
        configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
                        &se_nacl->acl_group);
 
+       if (tf->tf_ops->fabric_init_nodeacl) {
+               int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+               if (ret) {
+                       configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+                       core_tpg_del_initiator_node_acl(se_nacl);
+                       return ERR_PTR(ret);
+               }
+       }
+
        return &se_nacl->acl_group;
 }
 
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
                                struct se_wwn, wwn_group);
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
+       configfs_remove_default_groups(&wwn->fabric_stat_group);
        tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn(
                        &tf->tf_wwn_fabric_stats_cit);
        configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
 
+       if (tf->tf_ops->add_wwn_groups)
+               tf->tf_ops->add_wwn_groups(wwn);
        return &wwn->wwn_group;
 }
 
index c37eedc35a240d0d3fe4387698f50e7884e52cfc..3c3dc4a3d52cfb4685bdcbd2c62b55d33447804d 100644 (file)
@@ -376,6 +376,8 @@ config MTK_THERMAL
        tristate "Temperature sensor driver for mediatek SoCs"
        depends on ARCH_MEDIATEK || COMPILE_TEST
        depends on HAS_IOMEM
+       depends on NVMEM || NVMEM=n
+       depends on RESET_CONTROLLER
        default y
        help
          Enable this option if you want to have support for thermal management
index 36d07295f8e3ac6724181f9dc99152c78db1eb08..5e820b5415063b17fdd2982c0adfd5885db769b9 100644 (file)
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
         * Every step equals (1 * 200) / 255 celsius, and finally
         * need convert to millicelsius.
         */
-       return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+       return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
 }
 
 static inline long _temp_to_step(long temp)
 {
-       return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+       return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
 }
 
 static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
index 3d93b1c07ceef54d533515e9b6b47f4c39ddd8b1..507632b9648e3e193942ca3cc190c2cfab705d66 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/thermal.h>
 #include <linux/reset.h>
 #include <linux/types.h>
-#include <linux/nvmem-consumer.h>
 
 /* AUXADC Registers */
 #define AUXADC_CON0_V          0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
 
 module_platform_driver(mtk_thermal_driver);
 
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
 MODULE_DESCRIPTION("Mediatek thermal driver");
 MODULE_LICENSE("GPL v2");
index 49ac23d3e776c8c98b8884fde954fee4d0b3af02..d8ec44b194d64a012c1dff1472c233f83f1119b0 100644 (file)
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
  * otherwise, it returns a corresponding ERR_PTR(). Caller must
  * check the return value with help of IS_ERR() helper.
  */
-static struct __thermal_zone *
-thermal_of_build_thermal_zone(struct device_node *np)
+static struct __thermal_zone
+__init *thermal_of_build_thermal_zone(struct device_node *np)
 {
        struct device_node *child = NULL, *gchild;
        struct __thermal_zone *tz;
index 1246aa6fcab0caeda03ace6806c4df2d46c1f164..2f1a863a8e15bc834e007fea264d262d180a04cf 100644 (file)
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
        capped_extra_power = 0;
        extra_power = 0;
        for (i = 0; i < num_actors; i++) {
-               u64 req_range = req_power[i] * power_range;
+               u64 req_range = (u64)req_power[i] * power_range;
 
                granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
                                                         total_req_power);
index d4b54653ecf8d7f489a6200a8491b46b52f3ba14..5133cd1e10b7ae99d838823af2eef272aa00ab76 100644 (file)
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
 {
        struct thermal_zone_device *tz = to_thermal_zone(dev);
        int trip, ret;
-       unsigned long temperature;
+       int temperature;
 
        if (!tz->ops->set_trip_temp)
                return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
        if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
                return -EINVAL;
 
-       if (kstrtoul(buf, 10, &temperature))
+       if (kstrtoint(buf, 10, &temperature))
                return -EINVAL;
 
        ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
 {
        struct thermal_zone_device *tz = to_thermal_zone(dev);
        int ret = 0;
-       unsigned long temperature;
+       int temperature;
 
-       if (kstrtoul(buf, 10, &temperature))
+       if (kstrtoint(buf, 10, &temperature))
                return -EINVAL;
 
        if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
        struct thermal_zone_device *tz = to_thermal_zone(dev);          \
                                                                        \
        if (tz->tzp)                                                    \
-               return sprintf(buf, "%u\n", tz->tzp->name);             \
+               return sprintf(buf, "%d\n", tz->tzp->name);             \
        else                                                            \
                return -EIO;                                            \
        }                                                               \
index c01620780f5baf503ee6c46eef9714ef9ac2bae4..0c27a00ab42dd92677bef588f9f20970d1ab9735 100644 (file)
@@ -2662,7 +2662,7 @@ static int gsm_mux_net_start_xmit(struct sk_buff *skb,
        STATS(net).tx_bytes += skb->len;
        gsm_dlci_data_kick(dlci);
        /* And tell the kernel when the last transmit started. */
-       net->trans_start = jiffies;
+       netif_trans_update(net);
        muxnet_put(mux_net);
        return NETDEV_TX_OK;
 }
index e16a49b507efbd57734b0b8f522d0e2a5dfe03ce..cf0dc51a2690bac16c108952f93dda309fa89b7e 100644 (file)
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
  */
 
 static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
-               struct inode *ptm_inode, int idx)
+               struct file *file, int idx)
 {
        /* Master must be open via /dev/ptmx */
        return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
  */
 
 static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
-               struct inode *pts_inode, int idx)
+               struct file *file, int idx)
 {
        struct tty_struct *tty;
 
        mutex_lock(&devpts_mutex);
-       tty = devpts_get_priv(pts_inode);
+       tty = devpts_get_priv(file->f_path.dentry);
        mutex_unlock(&devpts_mutex);
        /* Master must be open before slave */
        if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
 /* this is called once with whichever end is closed last */
 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-       struct inode *ptmx_inode;
+       struct pts_fs_info *fsi;
 
        if (tty->driver->subtype == PTY_TYPE_MASTER)
-               ptmx_inode = tty->driver_data;
+               fsi = tty->driver_data;
        else
-               ptmx_inode = tty->link->driver_data;
-       devpts_kill_index(ptmx_inode, tty->index);
-       devpts_del_ref(ptmx_inode);
+               fsi = tty->link->driver_data;
+       devpts_kill_index(fsi, tty->index);
+       devpts_put_ref(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
 
 static int ptmx_open(struct inode *inode, struct file *filp)
 {
+       struct pts_fs_info *fsi;
        struct tty_struct *tty;
-       struct inode *slave_inode;
+       struct dentry *dentry;
        int retval;
        int index;
 
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
+       fsi = devpts_get_ref(inode, filp);
+       retval = -ENODEV;
+       if (!fsi)
+               goto out_free_file;
+
        /* find a device that is not in use. */
        mutex_lock(&devpts_mutex);
-       index = devpts_new_index(inode);
-       if (index < 0) {
-               retval = index;
-               mutex_unlock(&devpts_mutex);
-               goto err_file;
-       }
-
+       index = devpts_new_index(fsi);
        mutex_unlock(&devpts_mutex);
 
-       mutex_lock(&tty_mutex);
-       tty = tty_init_dev(ptm_driver, index);
+       retval = index;
+       if (index < 0)
+               goto out_put_ref;
 
-       if (IS_ERR(tty)) {
-               retval = PTR_ERR(tty);
-               goto out;
-       }
 
+       mutex_lock(&tty_mutex);
+       tty = tty_init_dev(ptm_driver, index);
        /* The tty returned here is locked so we can safely
           drop the mutex */
        mutex_unlock(&tty_mutex);
 
-       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-       tty->driver_data = inode;
+       retval = PTR_ERR(tty);
+       if (IS_ERR(tty))
+               goto out;
 
        /*
-        * In the case where all references to ptmx inode are dropped and we
-        * still have /dev/tty opened pointing to the master/slave pair (ptmx
-        * is closed/released before /dev/tty), we must make sure that the inode
-        * is still valid when we call the final pty_unix98_shutdown, thus we
-        * hold an additional reference to the ptmx inode. For the same /dev/tty
-        * last close case, we also need to make sure the super_block isn't
-        * destroyed (devpts instance unmounted), before /dev/tty is closed and
-        * on its release devpts_kill_index is called.
+        * From here on out, the tty is "live", and the index and
+        * fsi will be killed/put by the tty_release()
         */
-       devpts_add_ref(inode);
+       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+       tty->driver_data = fsi;
 
        tty_add_file(tty, filp);
 
-       slave_inode = devpts_pty_new(inode,
-                       MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
-                       tty->link);
-       if (IS_ERR(slave_inode)) {
-               retval = PTR_ERR(slave_inode);
+       dentry = devpts_pty_new(fsi, index, tty->link);
+       if (IS_ERR(dentry)) {
+               retval = PTR_ERR(dentry);
                goto err_release;
        }
-       tty->link->driver_data = slave_inode;
+       tty->link->driver_data = dentry;
 
        retval = ptm_driver->ops->open(tty, filp);
        if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        return 0;
 err_release:
        tty_unlock(tty);
+       // This will also put-ref the fsi
        tty_release(inode, filp);
        return retval;
 out:
-       mutex_unlock(&tty_mutex);
-       devpts_kill_index(inode, index);
-err_file:
+       devpts_kill_index(fsi, index);
+out_put_ref:
+       devpts_put_ref(fsi);
+out_free_file:
        tty_free_file(filp);
        return retval;
 }
index e213da01a3d71b5fa6317a4990d30944ac2cd2c0..00ad2637b08c082f523e3a28c3e3472b91b82438 100644 (file)
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
        /*
         * Empty the RX FIFO, we are not interested in anything
         * received during the half-duplex transmission.
+        * Enable previously disabled RX interrupts.
         */
-       if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+       if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
                serial8250_clear_fifos(p);
+
+               serial8250_rpm_get(p);
+
+               p->ier |= UART_IER_RLSI | UART_IER_RDI;
+               serial_port_out(&p->port, UART_IER, p->ier);
+
+               serial8250_rpm_put(p);
+       }
 }
 
 static void serial8250_em485_handle_stop_tx(unsigned long arg)
index 64742a086ae3385f3fb1680b912e48dcfb1770de..4d7cb9c04fce73b93db59fdaed5832310aaaff44 100644 (file)
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
 config SERIAL_8250_RT288X
        bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
        depends on SERIAL_8250
-       depends on MIPS || COMPILE_TEST
        default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
        help
          Selecting this option will add support for the alternate register
index c9fdfc8bf47f85b3a8e74596f5bea496ca707691..d08baa668d5de9feeaae9617710f5466360f270a 100644 (file)
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
        iowrite32be(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_be = {
+static struct uartlite_reg_ops uartlite_be = {
        .in = uartlite_inbe32,
        .out = uartlite_outbe32,
 };
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
        iowrite32(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_le = {
+static struct uartlite_reg_ops uartlite_le = {
        .in = uartlite_inle32,
        .out = uartlite_outle32,
 };
 
 static inline u32 uart_in32(u32 offset, struct uart_port *port)
 {
-       const struct uartlite_reg_ops *reg_ops = port->private_data;
+       struct uartlite_reg_ops *reg_ops = port->private_data;
 
        return reg_ops->in(port->membase + offset);
 }
 
 static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
 {
-       const struct uartlite_reg_ops *reg_ops = port->private_data;
+       struct uartlite_reg_ops *reg_ops = port->private_data;
 
        reg_ops->out(val, port->membase + offset);
 }
index f5476e270734b24a030a2b20db04628209a25994..c8c7601510948b202ed128ec59943069ac02da55 100644 (file)
@@ -7708,7 +7708,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        dev_kfree_skb(skb);
 
        /* save start time for transmit timeout detection */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* start hardware transmitter if necessary */
        spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7764,7 +7764,7 @@ static int hdlcdev_open(struct net_device *dev)
        mgsl_program_hw(info);
 
        /* enable network layer transmit */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_start_queue(dev);
 
        /* inform generic HDLC layer of current DCD status */
index c0a2f5a1b1c2d08b0534df5138b12d813f420602..d5b6471bece4c5f382f0079331f5cb7878b9a7c2 100644 (file)
@@ -1493,7 +1493,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        dev->stats.tx_bytes += skb->len;
 
        /* save start time for transmit timeout detection */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        spin_lock_irqsave(&info->lock, flags);
        tx_load(info, skb->data, skb->len);
@@ -1552,7 +1552,7 @@ static int hdlcdev_open(struct net_device *dev)
        program_hw(info);
 
        /* enable network layer transmit */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_start_queue(dev);
 
        /* inform generic HDLC layer of current DCD status */
index 90da0c712262d81b92891aec8103c95a9df44634..3f8968543af050ca106c5c166e238b879d83d05a 100644 (file)
@@ -1612,7 +1612,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
        dev_kfree_skb(skb);
 
        /* save start time for transmit timeout detection */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        /* start hardware transmitter if necessary */
        spin_lock_irqsave(&info->lock,flags);
@@ -1668,7 +1668,7 @@ static int hdlcdev_open(struct net_device *dev)
        program_hw(info);
 
        /* enable network layer transmit */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_start_queue(dev);
 
        /* inform generic HDLC layer of current DCD status */
index 8d26ed79bb4c593a1dabbd4ab76b45be2338e8fc..24d5491ef0da7c4b1c8354ac0feab068d0bfa059 100644 (file)
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
  *     Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
  */
 static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
-               struct inode *inode, int idx)
+               struct file *file, int idx)
 {
        struct tty_struct *tty;
 
        if (driver->ops->lookup)
-               tty = driver->ops->lookup(driver, inode, idx);
+               tty = driver->ops->lookup(driver, file, idx);
        else
                tty = driver->ttys[idx];
 
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
        }
 
        /* check whether we're reopening an existing tty */
-       tty = tty_driver_lookup_tty(driver, inode, index);
+       tty = tty_driver_lookup_tty(driver, filp, index);
        if (IS_ERR(tty)) {
                mutex_unlock(&tty_mutex);
                goto out;
@@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
        if (tty) {
                mutex_unlock(&tty_mutex);
                retval = tty_lock_interruptible(tty);
+               tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
                if (retval) {
                        if (retval == -EINTR)
                                retval = -ERESTARTSYS;
                        tty = ERR_PTR(retval);
                        goto out;
                }
-               /* safe to drop the kref from tty_driver_lookup_tty() */
-               tty_kref_put(tty);
                retval = tty_reopen(tty);
                if (retval < 0) {
                        tty_unlock(tty);
@@ -2158,7 +2157,7 @@ retry_open:
        read_lock(&tasklist_lock);
        spin_lock_irq(&current->sighand->siglock);
        noctty = (filp->f_flags & O_NOCTTY) ||
-                       device == MKDEV(TTY_MAJOR, 0) ||
+                       (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
                        device == MKDEV(TTYAUX_MAJOR, 1) ||
                        (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
                         tty->driver->subtype == PTY_TYPE_MASTER);
index 83fd30b0577c55f33eace402617ff3a2a4b09126..a6c4a1b895bd4e5acdab56b4a5a5abf512be6244 100644 (file)
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
        int err;
        unsigned long flags;
 
+       if (!cur) /* nothing to do */
+               return;
+
        acm->putbuffer = NULL;
        err = usb_autopm_get_interface_async(acm->control);
        spin_lock_irqsave(&acm->write_lock, flags);
        if (err < 0) {
                cur->use = 0;
+               acm->putbuffer = cur;
                goto out;
        }
 
index 5eb1a87228b47afd397fecfd4180413e7043a304..31ccdccd7a04fda36003cdc3ba58b82e25773fa9 100644 (file)
@@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
         * be the first thing immediately following the endpoint descriptor.
         */
        desc = (struct usb_ss_ep_comp_descriptor *) buffer;
-       buffer += desc->bLength;
-       size -= desc->bLength;
 
        if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
                        size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
                                        ep->desc.wMaxPacketSize;
                return;
        }
-
+       buffer += desc->bLength;
+       size -= desc->bLength;
        memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
 
        /* Check the various values */
@@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
                ep->ss_ep_comp.bmAttributes = 2;
        }
 
-       /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
-       if (usb_endpoint_xfer_isoc(&ep->desc) &&
-           USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
-               usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
-                                                       ep, buffer, size);
-
        if (usb_endpoint_xfer_isoc(&ep->desc))
                max_tx = (desc->bMaxBurst + 1) *
                        (USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
                                max_tx);
                ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
        }
+       /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
+       if (usb_endpoint_xfer_isoc(&ep->desc) &&
+           USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
+               usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+                                                       ep, buffer, size);
 }
 
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
index f9d42cf23e55f80104066f6c38b6deda9943f458..7859d738df418f5c3447a8f524d4d8a922299299 100644 (file)
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
                if (companion->bus != pdev->bus ||
                                PCI_SLOT(companion->devfn) != slot)
                        continue;
+
+               /*
+                * Companion device should be either UHCI,OHCI or EHCI host
+                * controller, otherwise skip.
+                */
+               if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+                               companion->class != CL_EHCI)
+                       continue;
+
                companion_hcd = pci_get_drvdata(companion);
                if (!companion_hcd || !companion_hcd->self.root_hub)
                        continue;
index 14718a9ffcfb18d398f8ab5ae2cb3df99d4994db..460c855be0d020bd21d3cc024d0a33d705b0d59d 100644 (file)
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
 
        return retval;
 }
-
-static int usb_port_prepare(struct device *dev)
-{
-       return 1;
-}
 #endif
 
 static const struct dev_pm_ops usb_port_pm_ops = {
 #ifdef CONFIG_PM
        .runtime_suspend =      usb_port_runtime_suspend,
        .runtime_resume =       usb_port_runtime_resume,
-       .prepare =              usb_port_prepare,
 #endif
 };
 
index dcb85e3cd5a76ab40e963f0e6dd402dfb63fa219..479187c32571d7efa9e49dccb3ea7d386102fd13 100644 (file)
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static int usb_dev_prepare(struct device *dev)
 {
-       struct usb_device *udev = to_usb_device(dev);
-
-       /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
-       if (udev->do_remote_wakeup != device_may_wakeup(dev))
-               return 0;
-
-       return 1;
+       return 0;               /* Implement eventually? */
 }
 
 static void usb_dev_complete(struct device *dev)
index e9940dd004e46c11472f776d79aebddc6497a906..818f158232bb6eb84e13e16a85808a25b7b1f668 100644 (file)
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 {
        u32 intmsk;
        u32 val;
+       u32 usbcfg;
 
        /* Kill any ep0 requests as controller will be reinitialized */
        kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
         * set configuration.
         */
 
+       /* keep other bits untouched (so e.g. forced modes are not lost) */
+       usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+       usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+               GUSBCFG_HNPCAP);
+
        /* set the PLL on, remove the HNP/SRP and set the PHY */
        val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
-       dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
-              (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
+       usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+               (val << GUSBCFG_USBTRDTIM_SHIFT);
+       dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
 
        dwc2_hsotg_init_fifo(hsotg);
 
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
 {
        u32 trdtim;
+       u32 usbcfg;
        /* unmask subset of endpoint interrupts */
 
        dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
 
        dwc2_hsotg_init_fifo(hsotg);
 
+       /* keep other bits untouched (so e.g. forced modes are not lost) */
+       usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+       usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+               GUSBCFG_HNPCAP);
+
        /* set the PLL on, remove the HNP/SRP and set the PHY */
        trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
-       dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
-               (trdtim << GUSBCFG_USBTRDTIM_SHIFT),
-               hsotg->regs + GUSBCFG);
+       usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+               (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
+       dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
 
        if (using_dma(hsotg))
                __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
index 17fd81447c9f72b452c83f8567e056e28bb1dd67..34277ced26bd27e6b66c3ffda544054383d6b0ef 100644 (file)
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
 static int dwc3_core_soft_reset(struct dwc3 *dwc)
 {
        u32             reg;
+       int             retries = 1000;
        int             ret;
 
-       /* Before Resetting PHY, put Core in Reset */
-       reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-       reg |= DWC3_GCTL_CORESOFTRESET;
-       dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
-       /* Assert USB3 PHY reset */
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-       reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
-       dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
-
-       /* Assert USB2 PHY reset */
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
-       dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-
        usb_phy_init(dwc->usb2_phy);
        usb_phy_init(dwc->usb3_phy);
        ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
                phy_exit(dwc->usb2_generic_phy);
                return ret;
        }
-       mdelay(100);
 
-       /* Clear USB3 PHY reset */
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-       reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
-       dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+       /*
+        * We're resetting only the device side because, if we're in host mode,
+        * XHCI driver will reset the host block. If dwc3 was configured for
+        * host-only mode, then we can return early.
+        */
+       if (dwc->dr_mode == USB_DR_MODE_HOST)
+               return 0;
 
-       /* Clear USB2 PHY reset */
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
-       dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+       reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+       reg |= DWC3_DCTL_CSFTRST;
+       dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
-       mdelay(100);
+       do {
+               reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+               if (!(reg & DWC3_DCTL_CSFTRST))
+                       return 0;
 
-       /* After PHYs are stable we can take Core out of reset state */
-       reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-       reg &= ~DWC3_GCTL_CORESOFTRESET;
-       dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+               udelay(1);
+       } while (--retries);
 
-       return 0;
+       return -ETIMEDOUT;
 }
 
 /**
@@ -1162,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
        phy_exit(dwc->usb2_generic_phy);
        phy_exit(dwc->usb3_generic_phy);
 
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
+       WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
+       WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+
        pinctrl_pm_select_sleep_state(dev);
 
        return 0;
@@ -1175,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
+       usb_phy_set_suspend(dwc->usb2_phy, 0);
+       usb_phy_set_suspend(dwc->usb3_phy, 0);
+       ret = phy_power_on(dwc->usb2_generic_phy);
+       if (ret < 0)
+               return ret;
+
+       ret = phy_power_on(dwc->usb3_generic_phy);
+       if (ret < 0)
+               goto err_usb2phy_power;
+
        usb_phy_init(dwc->usb3_phy);
        usb_phy_init(dwc->usb2_phy);
        ret = phy_init(dwc->usb2_generic_phy);
        if (ret < 0)
-               return ret;
+               goto err_usb3phy_power;
 
        ret = phy_init(dwc->usb3_generic_phy);
        if (ret < 0)
@@ -1212,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
 err_usb2phy_init:
        phy_exit(dwc->usb2_generic_phy);
 
+err_usb3phy_power:
+       phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+       phy_power_off(dwc->usb2_generic_phy);
+
        return ret;
 }
 
index 9ac37fe1b6a77956d530642db517e57eb3481fe8..cebf9e38b60acac5cf3361703cabef7b8e5563a2 100644 (file)
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
        file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
        if (!file) {
                ret = -ENOMEM;
-               goto err1;
+               goto err2;
        }
 
        if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
                                dwc, &dwc3_mode_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
        }
 
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
                                dwc, &dwc3_testmode_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
 
                file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
                                dwc, &dwc3_link_state_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
        }
 
        return 0;
 
+err2:
+       kfree(dwc->regset);
+
 err1:
        debugfs_remove_recursive(root);
 
@@ -686,5 +689,5 @@ err0:
 void dwc3_debugfs_exit(struct dwc3 *dwc)
 {
        debugfs_remove_recursive(dwc->root);
-       dwc->root = NULL;
+       kfree(dwc->regset);
 }
index 2be268d2423d9339679d38dc0a01982912424b96..72664700b8a25c7d9bcee89c16c68fa6b5792565 100644 (file)
@@ -39,8 +39,6 @@
 #define USBSS_IRQ_COREIRQ_EN   BIT(0)
 #define USBSS_IRQ_COREIRQ_CLR  BIT(0)
 
-static u64 kdwc3_dma_mask;
-
 struct dwc3_keystone {
        struct device                   *dev;
        struct clk                      *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
        if (IS_ERR(kdwc->usbss))
                return PTR_ERR(kdwc->usbss);
 
-       kdwc3_dma_mask = dma_get_mask(dev);
-       dev->dma_mask = &kdwc3_dma_mask;
-
        kdwc->clk = devm_clk_get(kdwc->dev, "usb");
 
        error = clk_prepare_enable(kdwc->clk);
index 22e9606d8e081c3ece06c3cf52f0e735ab44207c..55da2c7f727f95e678f37462cdba5c532d659936 100644 (file)
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
                dev_err(dev, "get_sync failed with err %d\n", ret);
-               goto err0;
+               goto err1;
        }
 
        dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
 
        ret = dwc3_omap_extcon_register(omap);
        if (ret < 0)
-               goto err2;
+               goto err1;
 
        ret = of_platform_populate(node, NULL, NULL, dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to create dwc3 core\n");
-               goto err3;
+               goto err2;
        }
 
        dwc3_omap_enable_irqs(omap);
 
        return 0;
 
-err3:
+err2:
        extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
        extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
-err2:
-       dwc3_omap_disable_irqs(omap);
 
 err1:
        pm_runtime_put_sync(dev);
-
-err0:
        pm_runtime_disable(dev);
 
        return ret;
index 009d83048c8c9ab51ae01191e3cb2aa30207d928..adc1e8a624cb036d8fdb7ceee8d87d153a472eb4 100644 (file)
@@ -35,6 +35,7 @@
 #define PCI_DEVICE_ID_INTEL_SPTLP              0x9d30
 #define PCI_DEVICE_ID_INTEL_SPTH               0xa130
 #define PCI_DEVICE_ID_INTEL_BXT                        0x0aaa
+#define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
index 3ac170f9d94d46156a172231c61be29c8067af09..8e4a1b195e9bd4d2646210f970e56244adc1833c 100644 (file)
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
                if (!usb_endpoint_xfer_isoc(desc))
-                       return 0;
+                       goto out;
 
                /* Link TRB for ISOC. The HWO bit is never reset */
                trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
        }
 
+out:
        switch (usb_endpoint_type(desc)) {
        case USB_ENDPOINT_XFER_CONTROL:
-               strlcat(dep->name, "-control", sizeof(dep->name));
+               /* don't change name */
                break;
        case USB_ENDPOINT_XFER_ISOC:
                strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
         * implemented.
         */
 
-       dwc->gadget_driver->resume(&dwc->gadget);
+       if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+               spin_unlock(&dwc->lock);
+               dwc->gadget_driver->resume(&dwc->gadget);
+               spin_lock(&dwc->lock);
+       }
 }
 
 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2931,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
+       if (!dwc->gadget_driver)
+               return 0;
+
        if (dwc->pullups_connected) {
                dwc3_gadget_disable_irq(dwc);
                dwc3_gadget_run_stop(dwc, true, true);
@@ -2949,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
        struct dwc3_ep          *dep;
        int                     ret;
 
+       if (!dwc->gadget_driver)
+               return 0;
+
        /* Start with SuperSpeed Default */
        dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
index a5c62093c26c67192dc91ca9920817e88928ae02..524e233d48def51b3e957af120fea769cb54ead0 100644 (file)
@@ -651,12 +651,15 @@ static int bos_desc(struct usb_composite_dev *cdev)
                ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
                ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
                ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+               ssp_cap->bReserved = 0;
+               ssp_cap->wReserved = 0;
 
                /* SSAC = 1 (2 attributes) */
                ssp_cap->bmAttributes = cpu_to_le32(1);
 
                /* Min RX/TX Lane Count = 1 */
-               ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12);
+               ssp_cap->wFunctionalitySupport =
+                       cpu_to_le16((1 << 8) | (1 << 12));
 
                /*
                 * bmSublinkSpeedAttr[0]:
@@ -666,7 +669,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
                 *   LSM = 10 (10 Gbps)
                 */
                ssp_cap->bmSublinkSpeedAttr[0] =
-                       (3 << 4) | (1 << 14) | (0xa << 16);
+                       cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
                /*
                 * bmSublinkSpeedAttr[1] =
                 *   ST  = Symmetric, TX
@@ -675,7 +678,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
                 *   LSM = 10 (10 Gbps)
                 */
                ssp_cap->bmSublinkSpeedAttr[1] =
-                       (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7);
+                       cpu_to_le32((3 << 4) | (1 << 14) |
+                                   (0xa << 16) | (1 << 7));
        }
 
        return le16_to_cpu(bos->wTotalLength);
index 8cfce105c7eeb2ed516c28507553d038e89f33a6..15b648cbc75c4141bb5c51bd03c2c6e5b5f6c821 100644 (file)
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
                                                   work);
        int ret = io_data->req->status ? io_data->req->status :
                                         io_data->req->actual;
+       bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
        if (io_data->read && ret > 0) {
                use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
 
        io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
 
-       if (io_data->ffs->ffs_eventfd &&
-           !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+       if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
                eventfd_signal(io_data->ffs->ffs_eventfd, 1);
 
        usb_ep_free_request(io_data->ep, io_data->req);
 
-       io_data->kiocb->private = NULL;
        if (io_data->read)
                kfree(io_data->to_free);
        kfree(io_data->buf);
@@ -1147,8 +1146,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
        ffs->sb              = sb;
        data->ffs_data       = NULL;
        sb->s_fs_info        = ffs;
-       sb->s_blocksize      = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize      = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic          = FUNCTIONFS_MAGIC;
        sb->s_op             = &ffs_sb_operations;
        sb->s_time_gran      = 1;
index 84c0ee5ebd1ea6e97abbfd22f69cdfaaa2627d70..58fc199a18ecd735021796ed4c352f7f728d0e22 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/kfifo.h>
+#include <linux/spinlock.h>
 
 #include <sound/core.h>
 #include <sound/initval.h>
@@ -89,6 +90,7 @@ struct f_midi {
        unsigned int buflen, qlen;
        /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
        DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
+       spinlock_t transmit_lock;
        unsigned int in_last_port;
 
        struct gmidi_in_port    in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
        /* allocate a bunch of read buffers and queue them all at once. */
        for (i = 0; i < midi->qlen && err == 0; i++) {
                struct usb_request *req =
-                       midi_alloc_ep_req(midi->out_ep, midi->buflen);
+                       midi_alloc_ep_req(midi->out_ep,
+                               max_t(unsigned, midi->buflen,
+                                       bulk_out_desc.wMaxPacketSize));
                if (req == NULL)
                        return -ENOMEM;
 
@@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi)
 {
        struct usb_ep *ep = midi->in_ep;
        int ret;
+       unsigned long flags;
 
        /* We only care about USB requests if IN endpoint is enabled */
        if (!ep || !ep->enabled)
                goto drop_out;
 
+       spin_lock_irqsave(&midi->transmit_lock, flags);
+
        do {
                ret = f_midi_do_transmit(midi, ep);
-               if (ret < 0)
+               if (ret < 0) {
+                       spin_unlock_irqrestore(&midi->transmit_lock, flags);
                        goto drop_out;
+               }
        } while (ret);
 
+       spin_unlock_irqrestore(&midi->transmit_lock, flags);
+
        return;
 
 drop_out:
@@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
        if (status)
                goto setup_fail;
 
+       spin_lock_init(&midi->transmit_lock);
+
        ++opts->refcnt;
        mutex_unlock(&opts->lock);
 
index 637809e3bd0d491bf2b30957c8adc87fcc9adabf..a3f7e7c55ebb18bd5045e27628c81075e484af37 100644 (file)
@@ -597,7 +597,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                DBG(dev, "tx queue err %d\n", retval);
                break;
        case 0:
-               net->trans_start = jiffies;
+               netif_trans_update(net);
                atomic_inc(&dev->tx_qlen);
        }
 
index 5cdaf0150a4ed4fc2efe42c19dff6083674ead13..e64479f882a52e787822b865eb447e8c1e484f9a 100644 (file)
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
                return -ENODEV;
 
        /* superblock */
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = GADGETFS_MAGIC;
        sb->s_op = &gadget_fs_operations;
        sb->s_time_gran = 1;
index 81d42cce885a4ae3a080c20f08ca20f4db241d16..18569de06b0495762950fcc724f69d05f10047af 100644 (file)
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
                list_del_init(&req->queue);
                request_complete(ep, req, -ECONNRESET);
        }
-
-       /* NOTE:  normally, the next call to the gadget driver is in
-        * charge of disabling endpoints... usually disconnect().
-        * The exception would be entering a high speed test mode.
-        *
-        * FIXME remove this code ... and retest thoroughly.
-        */
-       list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
-               if (ep->ep.desc) {
-                       spin_unlock(&udc->lock);
-                       usba_ep_disable(&ep->ep);
-                       spin_lock(&udc->lock);
-               }
-       }
 }
 
 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
index 4151597e9d2881ac107012d3cdc895bbcba7dc4f..e4e70e11d0f6ca2c2cc2b7a423353fb652dabf6a 100644 (file)
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
        INIT_WORK(&gadget->work, usb_gadget_state_work);
        gadget->dev.parent = parent;
 
-#ifdef CONFIG_HAS_DMA
-       dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
-       gadget->dev.dma_parms = parent->dma_parms;
-       gadget->dev.dma_mask = parent->dma_mask;
-#endif
-
        if (release)
                gadget->dev.release = release;
        else
index 80c1de239e9acef58b0e92ce2d4add8e437320dd..bad0d1f9a41d4ff9edbcb186eb86238abf878c74 100644 (file)
@@ -1861,6 +1861,12 @@ no_bw:
        kfree(xhci->rh_bw);
        kfree(xhci->ext_caps);
 
+       xhci->usb2_ports = NULL;
+       xhci->usb3_ports = NULL;
+       xhci->port_array = NULL;
+       xhci->rh_bw = NULL;
+       xhci->ext_caps = NULL;
+
        xhci->page_size = 0;
        xhci->page_shift = 0;
        xhci->bus_state[0].bus_suspended = 0;
index f0640b7a1c42e2b82c4b8c6071fa0c43561ab7e7..48672fac7ff3eb22389acfb4889a513e8ac79948 100644 (file)
@@ -48,6 +48,7 @@
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI             0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI             0x1aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
        struct xhci_hcd *xhci;
 
        xhci = hcd_to_xhci(pci_get_drvdata(dev));
+       xhci->xhc_state |= XHCI_STATE_REMOVING;
        if (xhci->shared_hcd) {
                usb_remove_hcd(xhci->shared_hcd);
                usb_put_hcd(xhci->shared_hcd);
index 5c15e9bc5f7a418e2c56645a2a753e659d20167a..474b5fa149007b8869f56671b6e3fed1050b9edf 100644 (file)
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
 
 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
 {
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
        /*
         * As of now platform drivers don't provide MSI support so we ensure
         * here that the generic code does not try to make a pci_dev from our
         * dev struct in order to setup MSI
         */
        xhci->quirks |= XHCI_PLAT;
+
+       /*
+        * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+        * to 1. However, these SoCs don't support 64-bit address memory
+        * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+        * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+        * xhci_gen_setup().
+        */
+       if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+           xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+               xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
 }
 
 /* called during probe() after chip reset completes */
index 5a2e2e3936c4c69e50959e37c04562fbc0439b1f..529c3c40f901df69250cc7d05502bb41177fa329 100644 (file)
@@ -14,7 +14,7 @@
 #include "xhci.h"      /* for hcd_to_xhci() */
 
 enum xhci_plat_type {
-       XHCI_PLAT_TYPE_MARVELL_ARMADA,
+       XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
        XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
        XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
 };
index 7cf66212ceae0cfbe1c161877044755057373781..99b4ff42f7a0148afa63c0da65371c4bee522664 100644 (file)
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
        int ret;
 
-       if (xhci->xhc_state) {
+       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+               (xhci->xhc_state & XHCI_STATE_HALTED)) {
                xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
                return -ESHUTDOWN;
        }
index d51ee0c3cf9f009d3f9098bfa77434a6ed4695b6..9e71c96ad74a8de43f78f611f1b8db5490007995 100644 (file)
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
                                "waited %u microseconds.\n",
                                XHCI_MAX_HALT_USEC);
        if (!ret)
-               xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+               /* clear state flags. Including dying, halted or removing */
+               xhci->xhc_state = 0;
 
        return ret;
 }
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                /* Resume root hubs only when have pending events. */
                status = readl(&xhci->op_regs->status);
                if (status & STS_EINT) {
-                       usb_hcd_resume_root_hub(hcd);
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
+                       usb_hcd_resume_root_hub(hcd);
                }
        }
 
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
        /* Re-enable port polling. */
        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
-       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       usb_hcd_poll_rh_status(hcd);
        set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
        usb_hcd_poll_rh_status(xhci->shared_hcd);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
 
        return retval;
 }
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        if (ret <= 0)
                return ret;
        xhci = hcd_to_xhci(hcd);
-       if (xhci->xhc_state & XHCI_STATE_DYING)
+       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+               (xhci->xhc_state & XHCI_STATE_REMOVING))
                return -ENODEV;
 
        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
        mutex_lock(&xhci->mutex);
 
-       if (xhci->xhc_state)    /* dying or halted */
+       if (xhci->xhc_state)    /* dying, removing or halted */
                goto out;
 
        if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                return retval;
        xhci_dbg(xhci, "Reset complete\n");
 
+       /*
+        * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+        * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+        * address memory pointers actually. So, this driver clears the AC64
+        * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+        * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+        */
+       if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+               xhci->hcc_params &= ~BIT(0);
+
        /* Set dma_mask and coherent_dma_mask to 64-bits,
         * if xHC supports 64-bit addressing */
        if (HCC_64BIT_ADDR(xhci->hcc_params) &&
index e293e0974f48257e32653e9ae30501d09c64ea1c..6c629c97f8ad09e6e292fc4513206cb9a5860ec1 100644 (file)
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
  */
 #define XHCI_STATE_DYING       (1 << 0)
 #define XHCI_STATE_HALTED      (1 << 1)
+#define XHCI_STATE_REMOVING    (1 << 2)
        /* Statistics */
        int                     error_bitmask;
        unsigned int            quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
 #define XHCI_PME_STUCK_QUIRK   (1 << 20)
 #define XHCI_MTK_HOST          (1 << 21)
 #define XHCI_SSIC_PORT_UNUSED  (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT  (1 << 23)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index 5e5a8fa005f8befc1ca9eb4814b12afbf8e3ccea..bc8889956d172b293e6771b2cd7e1e604005b2f1 100644 (file)
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
 {
        usb_phy_generic_register();
        musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
-       if (!musb->xceiv) {
+       if (IS_ERR(musb->xceiv)) {
                pr_err("HS UDC: no transceiver configured\n");
-               return -ENODEV;
+               return PTR_ERR(musb->xceiv);
        }
 
        /* Silicon does not implement ConfigData register.
index 87bd578799a8a9eb47339177cfc63a5b3bba5a5d..152865b3652295caf52340ca86397d5494f34c84 100644 (file)
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
                musb_writew(epio, MUSB_RXMAXP, 0);
        }
 
-       musb_ep->desc = NULL;
-       musb_ep->end_point.desc = NULL;
-
        /* abort all pending DMA and requests */
        nuke(musb_ep, -ESHUTDOWN);
 
+       musb_ep->desc = NULL;
+       musb_ep->end_point.desc = NULL;
+
        schedule_work(&musb->irq_work);
 
        spin_unlock_irqrestore(&(musb->lock), flags);
index 58487a4735218518ce50326750ca8718aaa2a516..2f8ad7f1f482cd55116b8cb3a7fd575fc79c360a 100644 (file)
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",
        .hcd_priv_size          = sizeof(struct musb *),
-       .flags                  = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags                  = HCD_USB2 | HCD_MEMORY,
 
        /* not using irq handler or reset hooks from usbcore, since
         * those must be shared with peripheral code for OTG configs
index 579587d972177338e97ca7ece02fabbc60fdcf55..3d7af85aecb9564d23946a98e7bb48dea81bf86f 100644 (file)
@@ -65,9 +65,7 @@ struct phy_8x16 {
        void __iomem                    *regs;
        struct clk                      *core_clk;
        struct clk                      *iface_clk;
-       struct regulator                *v3p3;
-       struct regulator                *v1p8;
-       struct regulator                *vdd;
+       struct regulator_bulk_data      regulator[3];
 
        struct reset_control            *phy_reset;
 
@@ -78,51 +76,6 @@ struct phy_8x16 {
        struct notifier_block           reboot_notify;
 };
 
-static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
-{
-       int ret;
-
-       ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
-       if (ret)
-               return ret;
-
-       ret = regulator_enable(qphy->vdd);
-       if (ret)
-               return ret;
-
-       ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
-       if (ret)
-               goto off_vdd;
-
-       ret = regulator_enable(qphy->v3p3);
-       if (ret)
-               goto off_vdd;
-
-       ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
-       if (ret)
-               goto off_3p3;
-
-       ret = regulator_enable(qphy->v1p8);
-       if (ret)
-               goto off_3p3;
-
-       return 0;
-
-off_3p3:
-       regulator_disable(qphy->v3p3);
-off_vdd:
-       regulator_disable(qphy->vdd);
-
-       return ret;
-}
-
-static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
-{
-       regulator_disable(qphy->v1p8);
-       regulator_disable(qphy->v3p3);
-       regulator_disable(qphy->vdd);
-}
-
 static int phy_8x16_notify_connect(struct usb_phy *phy,
                                   enum usb_device_speed speed)
 {
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
 
 static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
 {
-       struct regulator_bulk_data regs[3];
        struct device *dev = qphy->phy.dev;
        int ret;
 
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
        if (IS_ERR(qphy->iface_clk))
                return PTR_ERR(qphy->iface_clk);
 
-       regs[0].supply = "v3p3";
-       regs[1].supply = "v1p8";
-       regs[2].supply = "vddcx";
+       qphy->regulator[0].supply = "v3p3";
+       qphy->regulator[1].supply = "v1p8";
+       qphy->regulator[2].supply = "vddcx";
 
-       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
+                                     qphy->regulator);
        if (ret)
                return ret;
 
-       qphy->v3p3 = regs[0].consumer;
-       qphy->v1p8 = regs[1].consumer;
-       qphy->vdd  = regs[2].consumer;
-
        qphy->phy_reset = devm_reset_control_get(dev, "phy");
        if (IS_ERR(qphy->phy_reset))
                return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
        if (ret < 0)
                goto off_core;
 
-       ret = phy_8x16_regulators_enable(qphy);
-       if (0 && ret)
+       ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
+                                   qphy->regulator);
+       if (WARN_ON(ret))
                goto off_clks;
 
        qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
        extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
                                   &qphy->vbus_notify);
 off_power:
-       phy_8x16_regulators_disable(qphy);
+       regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
 off_clks:
        clk_disable_unprepare(qphy->iface_clk);
 off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(qphy->iface_clk);
        clk_disable_unprepare(qphy->core_clk);
-       phy_8x16_regulators_disable(qphy);
+       regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
        return 0;
 }
 
index b4de70ee16d3cfb56f4f3d5eb2047c5feb729a3a..000f9750149f5503b3da33aaf1f36bd9d550ab9d 100644 (file)
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
                goto __usbhs_pkt_handler_end;
        }
 
-       ret = func(pkt, &is_done);
+       if (likely(func))
+               ret = func(pkt, &is_done);
 
        if (is_done)
                __usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
 
        pkt->trans = len;
 
+       usbhsf_tx_irq_ctrl(pipe, 0);
        INIT_WORK(&pkt->work, xfer_work);
        schedule_work(&pkt->work);
 
index 664b263e4b204cf6ac426d8a1bf179f3b5d68b8f..53d104b56ef17a9d47d29a9d1bcdfac9f8f4e7de 100644 (file)
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
        struct usbhs_pipe *pipe = pkt->pipe;
        struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
        struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+       unsigned long flags;
 
        ureq->req.actual = pkt->actual;
 
-       usbhsg_queue_pop(uep, ureq, 0);
+       usbhs_lock(priv, flags);
+       if (uep)
+               __usbhsg_queue_pop(uep, ureq, 0);
+       usbhs_unlock(priv, flags);
 }
 
 static void usbhsg_queue_push(struct usbhsg_uep *uep,
index fbfe761c7fba7de2e8e8f4c850b140475c6378b6..7c9f25e9c422a278ae71ea37de6a5c367a6e58cf 100644 (file)
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
        { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+       { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
        { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+       { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+       { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
@@ -165,6 +169,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
        { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
        { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+       { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
        { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
index b283eb8b86d68f5b53c336ee70bc8a6cd609adce..bbeeb2bd55a83cebf4cfec9b4b8f2876edde1ca0 100644 (file)
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
        struct usb_serial *serial = port->serial;
        struct cypress_private *priv;
 
+       if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+               dev_err(&port->dev, "required endpoint is missing\n");
+               return -ENODEV;
+       }
+
        priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
                cypress_set_termios(tty, port, &priv->tmp_termios);
 
        /* setup the port and start reading from the device */
-       if (!port->interrupt_in_urb) {
-               dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
-                       __func__);
-               return -1;
-       }
-
        usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
                usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
                port->interrupt_in_urb->transfer_buffer,
index 010a42a92688954d7b65a82472d28fad346c80c3..16e8e37b3b36d521749ec48d17b2becfc4ae4f51 100644 (file)
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
 
 static int digi_startup(struct usb_serial *serial)
 {
+       struct device *dev = &serial->interface->dev;
        struct digi_serial *serial_priv;
        int ret;
+       int i;
+
+       /* check whether the device has the expected number of endpoints */
+       if (serial->num_port_pointers < serial->type->num_ports + 1) {
+               dev_err(dev, "OOB endpoints missing\n");
+               return -ENODEV;
+       }
+
+       for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+               if (!serial->port[i]->read_urb) {
+                       dev_err(dev, "bulk-in endpoint missing\n");
+                       return -ENODEV;
+               }
+               if (!serial->port[i]->write_urb) {
+                       dev_err(dev, "bulk-out endpoint missing\n");
+                       return -ENODEV;
+               }
+       }
 
        serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
        if (!serial_priv)
index 427ae43ee898fabcebafdac0d9042cff9fa053d5..3a814e802dee89644195b7e4c985b07ed8d218ad 100644 (file)
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+       /* ICP DAS I-756xU devices */
+       { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+       { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+       { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
        { }                                     /* Terminating entry */
 };
 
index a84df2513994a57377cc41105f3078e36d2351a2..c5d6c1e73e8e0450d46dc7140637146404f262bb 100644 (file)
 #define NOVITUS_VID                    0x1a28
 #define NOVITUS_BONO_E_PID             0x6010
 
+/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID                     0x1b5c
+#define ICPDAS_I7560U_PID              0x0103
+#define ICPDAS_I7561U_PID              0x0104
+#define ICPDAS_I7563U_PID              0x0105
+
 /*
  * RT Systems programming cables for various ham radios
  */
index 4446b8d70ac203cc8b763c38b6b93903cd1a0cb8..885655315de15a4b28db64da22f5d94bea7c8ea9 100644 (file)
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
 
 static int mct_u232_port_probe(struct usb_serial_port *port)
 {
+       struct usb_serial *serial = port->serial;
        struct mct_u232_private *priv;
 
+       /* check first to simplify error handling */
+       if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+               dev_err(&port->dev, "expected endpoint missing\n");
+               return -ENODEV;
+       }
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
        /* Use second interrupt-in endpoint for reading. */
-       priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+       priv->read_urb = serial->port[1]->interrupt_in_urb;
        priv->read_urb->context = port;
 
        spin_lock_init(&priv->lock);
index 348e19834b83e12e7aafafa9d46374822fe43f7f..c6f497f1652659dc59e700181f35d75aab6ceae1 100644 (file)
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),                     /* D-Link DWM-221 B1 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
index dba51362d2e26cfc0dfc9f82cbd1535e8b8a4ec3..90901861bfc044ea891e09cafa91092b88b2d507 100644 (file)
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
                unsigned int max_sectors = 64;
 
                if (us->fflags & US_FL_MAX_SECTORS_MIN)
-                       max_sectors = PAGE_CACHE_SIZE >> 9;
+                       max_sectors = PAGE_SIZE >> 9;
                if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
                        blk_queue_max_hw_sectors(sdev->request_queue,
                                              max_sectors);
index 13e4cc31bc79e61a11539c42b8ea182b3dd6b3af..16bc679dc2fc4b807d0b175ba4914102ce964ec2 100644 (file)
@@ -2,7 +2,7 @@
  * USB Attached SCSI
  * Note that this is not the same as the USB Mass Storage driver
  *
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
  * Copyright Matthew Wilcox for Intel Corp, 2010
  * Copyright Sarah Sharp for Intel Corp, 2010
  *
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
        return SUCCESS;
 }
 
+static int uas_target_alloc(struct scsi_target *starget)
+{
+       struct uas_dev_info *devinfo = (struct uas_dev_info *)
+                       dev_to_shost(starget->dev.parent)->hostdata;
+
+       if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+               starget->no_report_luns = 1;
+
+       return 0;
+}
+
 static int uas_slave_alloc(struct scsi_device *sdev)
 {
        struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
-       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
 
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
        .module = THIS_MODULE,
        .name = "uas",
        .queuecommand = uas_queuecommand,
+       .target_alloc = uas_target_alloc,
        .slave_alloc = uas_slave_alloc,
        .slave_configure = uas_slave_configure,
        .eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
        if (result)
                goto set_alt0;
 
+       /*
+        * 1 tag is reserved for untagged commands +
+        * 1 tag to avoid off by one errors in some bridge firmwares
+        */
+       shost->can_queue = devinfo->qdepth - 2;
+
        usb_set_intfdata(intf, shost);
        result = scsi_add_host(shost, &intf->dev);
        if (result)
index ccc113e83d88e2b1e69e207989a5d5b49d526ae2..53341a77d89f275b44f9baf44f6caa25d7675802 100644 (file)
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+               "Seagate",
+               "Expansion Desk",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_LUNS),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
                "Seagate",
index 43576ed31ccdc4bfcfcafba0f95e6006a6bd63b6..9de988a0f856d3d8e220b47e7fe1c8e93cd11c00 100644 (file)
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                        US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
                        US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
                        US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
-                       US_FL_MAX_SECTORS_240);
+                       US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
 
        p = quirks;
        while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                case 'i':
                        f |= US_FL_IGNORE_DEVICE;
                        break;
+               case 'j':
+                       f |= US_FL_NO_REPORT_LUNS;
+                       break;
                case 'l':
                        f |= US_FL_NOT_LOCKABLE;
                        break;
index facaaf003f19931b2f15603568bb565f3de40607..e40da7759a0e6cd3a55bccdf90bb10284376ab11 100644 (file)
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
        if (!(size > 0))
                return 0;
 
+       if (size > urb->transfer_buffer_length) {
+               /* should not happen, probably malicious packet */
+               if (ud->side == USBIP_STUB) {
+                       usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+                       return 0;
+               } else {
+                       usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+                       return -EPIPE;
+               }
+       }
+
        ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
        if (ret != size) {
                dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
index fe274b5851c77ff5e59b7c88f205cbf09ec61114..93e66a9148b90de9a3308d3b4b49a359414c7363 100644 (file)
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
                fb->off_ienb = CLCD_PL111_IENB;
                fb->off_cntl = CLCD_PL111_CNTL;
        } else {
-#ifdef CONFIG_ARCH_VERSATILE
-               fb->off_ienb = CLCD_PL111_IENB;
-               fb->off_cntl = CLCD_PL111_CNTL;
-#else
-               fb->off_ienb = CLCD_PL110_IENB;
-               fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+               if (of_machine_is_compatible("arm,versatile-ab") ||
+                   of_machine_is_compatible("arm,versatile-pb")) {
+                       fb->off_ienb = CLCD_PL111_IENB;
+                       fb->off_cntl = CLCD_PL111_CNTL;
+               } else {
+                       fb->off_ienb = CLCD_PL110_IENB;
+                       fb->off_cntl = CLCD_PL110_CNTL;
+               }
        }
 
        fb->clk = clk_get(&fb->dev->dev, NULL);
index abfd1f6e33275b6d3b192df0c504669ab771c5e7..1954ec913ce5d188e47bc90f634c3a2909beb40a 100644 (file)
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
                  char *desc, struct gpio_desc **gpiod)
 {
-       struct gpio_desc *gd;
        int r;
 
-       *gpiod = NULL;
-
        r = devm_gpio_request_one(dev, gpio, flags, desc);
-       if (r)
+       if (r) {
+               *gpiod = NULL;
                return r == -ENOENT ? 0 : r;
+       }
 
-       gd = gpio_to_desc(gpio);
-       if (IS_ERR(gd))
-               return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+       *gpiod = gpio_to_desc(gpio);
 
-       *gpiod = gd;
        return 0;
 }
 
index 71a923e53f93d62e0b6f3dd213077ddf15b5e799..3b1ca441107370d39c54bd4bffa24248eaa730c5 100644 (file)
@@ -735,7 +735,7 @@ out:
 
 out_unmap:
        for (i = 0; i < nr_pages; i++)
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
 
        kfree(pages);
 
index f6f28cc7eb45765af6db2f4fa2a41de6fcbecf05..e76bd91a29da3b4e09ddf35c806159e7a0ab61c0 100644 (file)
@@ -17,6 +17,7 @@
  *
  */
 
+#include <linux/delay.h>
 #define VIRTIO_PCI_NO_LEGACY
 #include "virtio_pci_common.h"
 
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        /* 0 status means a reset. */
        vp_iowrite8(0, &vp_dev->common->device_status);
-       /* Flush out the status write, and flush in device writes,
-        * including MSI-X interrupts, if any. */
-       vp_ioread8(&vp_dev->common->device_status);
+       /* After writing 0 to device_status, the driver MUST wait for a read of
+        * device_status to return 0 before reinitializing the device.
+        * This will flush out the status write, and flush in device writes,
+        * including MSI-X interrupts, if any.
+        */
+       while (vp_ioread8(&vp_dev->common->device_status))
+               msleep(1);
        /* Flush pending VQ/configuration callbacks. */
        vp_synchronize_vectors(vdev);
 }
index 5c802d47892c5091c971eaef256310f99b4d175d..ca6bfddaacad2beeb1897a0e397bd5300ccff30d 100644 (file)
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
        const char *name)
 {
        struct virtqueue *vq;
-       void *queue;
+       void *queue = NULL;
        dma_addr_t dma_addr;
        size_t queue_size_in_bytes;
        struct vring vring;
index 9781e0dd59d6fc6a1b02da5e51e516e27ccafad1..d46839f51e730ff50e2c756a9f4b1326af82e2ec 100644 (file)
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
+static void release_memory_resource(struct resource *resource);
+
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                return NULL;
        }
 
+#ifdef CONFIG_SPARSEMEM
+       {
+               unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+               unsigned long pfn = res->start >> PAGE_SHIFT;
+
+               if (pfn > limit) {
+                       pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+                              pfn, limit);
+                       release_memory_resource(res);
+                       return NULL;
+               }
+       }
+#endif
+
        return res;
 }
 
index 488017a0806a237d6bd44f330be055059e5141cb..cb7138c97c692da92b71af1e6adc6539020702b4 100644 (file)
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
        struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
        int rc = 0;
 
-       irq_move_irq(data);
+       if (!VALID_EVTCHN(evtchn))
+               return;
 
-       if (VALID_EVTCHN(evtchn))
+       if (unlikely(irqd_is_setaffinity_pending(data))) {
+               int masked = test_and_set_mask(evtchn);
+
+               clear_evtchn(evtchn);
+
+               irq_move_masked_irq(data);
+
+               if (!masked)
+                       unmask_evtchn(evtchn);
+       } else
                clear_evtchn(evtchn);
 
        if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
 {
        int evtchn = evtchn_from_irq(data->irq);
 
-       irq_move_irq(data);
+       if (!VALID_EVTCHN(evtchn))
+               return;
 
-       if (VALID_EVTCHN(evtchn))
+       if (unlikely(irqd_is_setaffinity_pending(data))) {
+               int masked = test_and_set_mask(evtchn);
+
+               clear_evtchn(evtchn);
+
+               irq_move_masked_irq(data);
+
+               if (!masked)
+                       unmask_evtchn(evtchn);
+       } else
                clear_evtchn(evtchn);
 }
 
index 38272ad245516c272ab571bed1a5c962f4a0e808..f4edd6df3df235c55aef1329cb8e17b5ae8cf9f7 100644 (file)
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
 {
        unsigned int new_size;
        evtchn_port_t *new_ring, *old_ring;
-       unsigned int p, c;
 
        /*
         * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
        /*
         * Copy the old ring contents to the new ring.
         *
-        * If the ring contents crosses the end of the current ring,
-        * it needs to be copied in two chunks.
+        * To take care of wrapping, a full ring, and the new index
+        * pointing into the second half, simply copy the old contents
+        * twice.
         *
         * +---------+    +------------------+
-        * |34567  12| -> |       1234567    |
-        * +-----p-c-+    +------------------+
+        * |34567  12| -> |34567  1234567  12|
+        * +-----p-c-+    +-------c------p---+
         */
-       p = evtchn_ring_offset(u, u->ring_prod);
-       c = evtchn_ring_offset(u, u->ring_cons);
-       if (p < c) {
-               memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
-               memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
-       } else
-               memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+       memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+       memcpy(new_ring + u->ring_size, old_ring,
+              u->ring_size * sizeof(*u->ring));
 
        u->ring = new_ring;
        u->ring_size = new_size;
index e9e04376c52ce8ef4673d041281c1db1e2c4ac99..ac9225e86bf3584ae126e283c4568637e5b5ffca 100644 (file)
@@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
         * If called with zero offset, we should release
         * the private state assocated with the page
         */
-       if (offset == 0 && length == PAGE_CACHE_SIZE)
+       if (offset == 0 && length == PAGE_SIZE)
                v9fs_fscache_invalidate_page(page);
 }
 
@@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
        struct bio_vec bvec;
        int err, len;
 
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
+       if (page->index == size >> PAGE_SHIFT)
+               len = size & ~PAGE_MASK;
        else
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
 
        bvec.bv_page = page;
        bvec.bv_offset = 0;
@@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
        int retval = 0;
        struct page *page;
        struct v9fs_inode *v9inode;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct inode *inode = mapping->host;
 
 
@@ -288,11 +288,11 @@ start:
        if (PageUptodate(page))
                goto out;
 
-       if (len == PAGE_CACHE_SIZE)
+       if (len == PAGE_SIZE)
                goto out;
 
        retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
-       page_cache_release(page);
+       put_page(page);
        if (!retval)
                goto start;
 out:
@@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
                /*
                 * zero out the rest of the area
                 */
-               unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+               unsigned from = pos & (PAGE_SIZE - 1);
 
                zero_user(page, from + copied, len - copied);
                flush_dcache_page(page);
@@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
        }
        set_page_dirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return copied;
 }
index eadc894faea2ead1f720ad22da63a42889967ebe..b84c291ba1ebf38d0295b0dd919c794c28caa099 100644 (file)
@@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                struct inode *inode = file_inode(file);
                loff_t i_size;
                unsigned long pg_start, pg_end;
-               pg_start = origin >> PAGE_CACHE_SHIFT;
-               pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
+               pg_start = origin >> PAGE_SHIFT;
+               pg_end = (origin + retval - 1) >> PAGE_SHIFT;
                if (inode->i_mapping && inode->i_mapping->nrpages)
                        invalidate_inode_pages2_range(inode->i_mapping,
                                                      pg_start, pg_end);
index bf495cedec26a2aad7ff514a25d7609499b7d4dc..de3ed862919691344e5080c855403d4236512cd8 100644 (file)
@@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
                sb->s_op = &v9fs_super_ops;
        sb->s_bdi = &v9ses->bdi;
        if (v9ses->cache)
-               sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
+               sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
 
        sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
        if (!v9ses->cache)
index 22fc7c802d698766fb5b274e686e89df4e0d76d1..0cde550050e8e8c54c75ca58a943a3a82007eddf 100644 (file)
@@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
 
        pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
                 page->index, to);
-       BUG_ON(to > PAGE_CACHE_SIZE);
+       BUG_ON(to > PAGE_SIZE);
        bsize = AFFS_SB(sb)->s_data_blksize;
-       tmp = page->index << PAGE_CACHE_SHIFT;
+       tmp = page->index << PAGE_SHIFT;
        bidx = tmp / bsize;
        boff = tmp % bsize;
 
@@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page)
        int err;
 
        pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
-       to = PAGE_CACHE_SIZE;
-       if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
-               to = inode->i_size & ~PAGE_CACHE_MASK;
-               memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
+       to = PAGE_SIZE;
+       if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
+               to = inode->i_size & ~PAGE_MASK;
+               memset(page_address(page) + to, 0, PAGE_SIZE - to);
        }
 
        err = affs_do_readpage_ofs(page, to);
@@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
                        return err;
        }
 
-       index = pos >> PAGE_CACHE_SHIFT;
+       index = pos >> PAGE_SHIFT;
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
@@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
                return 0;
 
        /* XXX: inefficient but safe in the face of short writes */
-       err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE);
+       err = affs_do_readpage_ofs(page, PAGE_SIZE);
        if (err) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return err;
 }
@@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
        u32 tmp;
        int written;
 
-       from = pos & (PAGE_CACHE_SIZE - 1);
+       from = pos & (PAGE_SIZE - 1);
        to = pos + len;
        /*
         * XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
 
        bh = NULL;
        written = 0;
-       tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+       tmp = (page->index << PAGE_SHIFT) + from;
        bidx = tmp / bsize;
        boff = tmp % bsize;
        if (boff) {
@@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
 
 done:
        affs_brelse(bh);
-       tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+       tmp = (page->index << PAGE_SHIFT) + from;
        if (tmp > inode->i_size)
                inode->i_size = AFFS_I(inode)->mmu_private = tmp;
 
 err_first_bh:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return written;
 
index e10e17788f06073b2cd80204bf09343918654111..5fda2bc53cd7a35a38cefae7d72d9c8a142929f7 100644 (file)
@@ -181,7 +181,7 @@ error:
 static inline void afs_dir_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
index 999bc3caec9276b8148448d4ec2eadb0badbf3f0..6344aee4ac4bff8e768fc7c344ec7ccea3670b98 100644 (file)
@@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page)
                _debug("cache said ENOBUFS");
        default:
        go_on:
-               offset = page->index << PAGE_CACHE_SHIFT;
+               offset = page->index << PAGE_SHIFT;
                len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
 
                /* read the contents of the file from the server into the
@@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
        BUG_ON(!PageLocked(page));
 
        /* we clean up only if the entire page is being invalidated */
-       if (offset == 0 && length == PAGE_CACHE_SIZE) {
+       if (offset == 0 && length == PAGE_SIZE) {
 #ifdef CONFIG_AFS_FSCACHE
                if (PageFsCache(page)) {
                        struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
index ccd0b212e82a703b6ea1792339f7bf4603e5015f..81dd075356b968e3a360492a8a5f11dcc0f450b6 100644 (file)
@@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
 
        kunmap(page);
 out_free:
-       page_cache_release(page);
+       put_page(page);
 out:
        _leave(" = %d", ret);
        return ret;
@@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
                buf = kmap_atomic(page);
                memcpy(devname, buf, size);
                kunmap_atomic(buf);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
        }
 
@@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
        return mnt;
 
 error:
-       page_cache_release(page);
+       put_page(page);
 error_no_page:
        free_page((unsigned long) options);
 error_no_options:
index b50642870a43b9675a3b1fe97b1cfadcdbb1c54a..63cd9f939f19965b3b2f049b444b9bbba97528f9 100644 (file)
@@ -65,6 +65,12 @@ static void afs_async_workfn(struct work_struct *work)
        call->async_workfn(call);
 }
 
+static int afs_wait_atomic_t(atomic_t *p)
+{
+       schedule();
+       return 0;
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -126,13 +132,16 @@ void afs_close_socket(void)
 {
        _enter("");
 
+       wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
+                        TASK_UNINTERRUPTIBLE);
+       _debug("no outstanding calls");
+
        sock_release(afs_socket);
 
        _debug("dework");
        destroy_workqueue(afs_async_calls);
 
        ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
-       ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
        _leave("");
 }
 
@@ -178,8 +187,6 @@ static void afs_free_call(struct afs_call *call)
 {
        _debug("DONE %p{%s} [%d]",
               call, call->type->name, atomic_read(&afs_outstanding_calls));
-       if (atomic_dec_return(&afs_outstanding_calls) == -1)
-               BUG();
 
        ASSERTCMP(call->rxcall, ==, NULL);
        ASSERT(!work_pending(&call->async_work));
@@ -188,6 +195,9 @@ static void afs_free_call(struct afs_call *call)
 
        kfree(call->request);
        kfree(call);
+
+       if (atomic_dec_and_test(&afs_outstanding_calls))
+               wake_up_atomic_t(&afs_outstanding_calls);
 }
 
 /*
@@ -420,9 +430,11 @@ error_kill_call:
 }
 
 /*
- * handles intercepted messages that were arriving in the socket's Rx queue
- * - called with the socket receive queue lock held to ensure message ordering
- * - called with softirqs disabled
+ * Handles intercepted messages that were arriving in the socket's Rx queue.
+ *
+ * Called from the AF_RXRPC call processor in waitqueue process context.  For
+ * each call, it is guaranteed this will be called in order of packet to be
+ * delivered.
  */
 static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
                               struct sk_buff *skb)
@@ -513,6 +525,12 @@ static void afs_deliver_to_call(struct afs_call *call)
                        call->state = AFS_CALL_ABORTED;
                        _debug("Rcv ABORT %u -> %d", abort_code, call->error);
                        break;
+               case RXRPC_SKB_MARK_LOCAL_ABORT:
+                       abort_code = rxrpc_kernel_get_abort_code(skb);
+                       call->error = call->type->abort_to_error(abort_code);
+                       call->state = AFS_CALL_ABORTED;
+                       _debug("Loc ABORT %u -> %d", abort_code, call->error);
+                       break;
                case RXRPC_SKB_MARK_NET_ERROR:
                        call->error = -rxrpc_kernel_get_error_number(skb);
                        call->state = AFS_CALL_ERROR;
index 81afefe7d8a6ec4602f95e7db4c2f8fc0dc12e48..fbdb022b75a27be11b5699203f8c04e32c499cf6 100644 (file)
@@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb,
        _enter("");
 
        /* fill in the superblock */
-       sb->s_blocksize         = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+       sb->s_blocksize         = PAGE_SIZE;
+       sb->s_blocksize_bits    = PAGE_SHIFT;
        sb->s_magic             = AFS_FS_MAGIC;
        sb->s_op                = &afs_super_ops;
        sb->s_bdi               = &as->volume->bdi;
index dfef94f70667cde167dad60d71d1e23622c24cf7..65de439bdc4f5402bc513eae3e66c09ce685965f 100644 (file)
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
        _enter(",,%llu", (unsigned long long)pos);
 
        i_size = i_size_read(&vnode->vfs_inode);
-       if (pos + PAGE_CACHE_SIZE > i_size)
+       if (pos + PAGE_SIZE > i_size)
                len = i_size - pos;
        else
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
 
        ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
        if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        struct page *page;
        struct key *key = file->private_data;
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        unsigned to = from + len;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        int ret;
 
        _enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        *pagep = page;
        /* page won't leak in error case: it eventually gets cleaned off LRU */
 
-       if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
-               ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
+       if (!PageUptodate(page) && len != PAGE_SIZE) {
+               ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
                if (ret < 0) {
                        kfree(candidate);
                        _leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
        if (PageDirty(page))
                _debug("dirtied");
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return copied;
 }
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
 
                if (page->index > end) {
                        *_next = index;
-                       page_cache_release(page);
+                       put_page(page);
                        _leave(" = 0 [%lx]", *_next);
                        return 0;
                }
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
 
                if (page->mapping != mapping) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        continue;
                }
 
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
 
                ret = afs_write_back_from_locked_page(wb, page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                if (ret < 0) {
                        _leave(" = %d", ret);
                        return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
                                                    &next);
                mapping->writeback_index = next;
        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
-               end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
+               end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
                ret = afs_writepages_region(mapping, wbc, 0, end, &next);
                if (wbc->nr_to_write > 0)
                        mapping->writeback_index = next;
        } else {
-               start = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               start = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                ret = afs_writepages_region(mapping, wbc, start, end, &next);
        }
 
index 7d914c67a9d07f7ebb8a3f8f0bea8200d739ee9f..81381cc0dd177f5f891234a9b065c78a2eec1714 100644 (file)
@@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                                void *kaddr = kmap(page);
                                stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
                                kunmap(page);
-                               page_cache_release(page);
+                               put_page(page);
                        } else
                                stop = !dump_skip(cprm, PAGE_SIZE);
                        if (stop)
index b1adb92e69de7a8049dd914f3f8bb83d8d8b9c7f..083ea2bc60abd5f7c1ef1e76ad4b304e884c44fa 100644 (file)
@@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
                                void *kaddr = kmap(page);
                                res = dump_emit(cprm, kaddr, PAGE_SIZE);
                                kunmap(page);
-                               page_cache_release(page);
+                               put_page(page);
                        } else {
                                res = dump_skip(cprm, PAGE_SIZE);
                        }
index 3172c4e2f50255e1cb1fb426c1723a6b7b493bb2..20a2c02b77c45253d82014b8c598f4bd4a5219d0 100644 (file)
@@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
        ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return ret;
 }
@@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size)
        inode_lock(bdev->bd_inode);
        i_size_write(bdev->bd_inode, size);
        inode_unlock(bdev->bd_inode);
-       while (bsize < PAGE_CACHE_SIZE) {
+       while (bsize < PAGE_SIZE) {
                if (size & bsize)
                        break;
                bsize <<= 1;
index e34a71b3e225325dd73dd6ef51a434246119f279..516e19d1d202c6fe6c064b9b08a50d1527457680 100644 (file)
@@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                        BUG_ON(NULL == l);
 
                        ret = btrfsic_read_block(state, &tmp_next_block_ctx);
-                       if (ret < (int)PAGE_CACHE_SIZE) {
+                       if (ret < (int)PAGE_SIZE) {
                                printk(KERN_INFO
                                       "btrfsic: read @logical %llu failed!\n",
                                       tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data(
        size_t offset_in_page;
        char *kaddr;
        char *dst = (char *)dstv;
-       size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
 
        WARN_ON(offset + len > block_ctx->len);
-       offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1);
+       offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
 
        while (len > 0) {
-               cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
-               BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE));
+               cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
+               BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
                kaddr = block_ctx->datav[i];
                memcpy(dst, kaddr + offset_in_page, cur);
 
@@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
 
                BUG_ON(!block_ctx->datav);
                BUG_ON(!block_ctx->pagev);
-               num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
-                           PAGE_CACHE_SHIFT;
+               num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+                           PAGE_SHIFT;
                while (num_pages > 0) {
                        num_pages--;
                        if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state,
        BUG_ON(block_ctx->datav);
        BUG_ON(block_ctx->pagev);
        BUG_ON(block_ctx->mem_to_free);
-       if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
+       if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
                printk(KERN_INFO
                       "btrfsic: read_block() with unaligned bytenr %llu\n",
                       block_ctx->dev_bytenr);
                return -1;
        }
 
-       num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
-                   PAGE_CACHE_SHIFT;
+       num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+                   PAGE_SHIFT;
        block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
                                          sizeof(*block_ctx->pagev)) *
                                         num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
 
                for (j = i; j < num_pages; j++) {
                        ret = bio_add_page(bio, block_ctx->pagev[j],
-                                          PAGE_CACHE_SIZE, 0);
-                       if (PAGE_CACHE_SIZE != ret)
+                                          PAGE_SIZE, 0);
+                       if (PAGE_SIZE != ret)
                                break;
                }
                if (j == i) {
@@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                        return -1;
                }
                bio_put(bio);
-               dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+               dev_bytenr += (j - i) * PAGE_SIZE;
                i = j;
        }
        for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
        u32 crc = ~(u32)0;
        unsigned int i;
 
-       if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+       if (num_pages * PAGE_SIZE < state->metablock_size)
                return 1; /* not metadata */
-       num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+       num_pages = state->metablock_size >> PAGE_SHIFT;
        h = (struct btrfs_header *)datav[0];
 
        if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
 
        for (i = 0; i < num_pages; i++) {
                u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
-               size_t sublen = i ? PAGE_CACHE_SIZE :
-                                   (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
+               size_t sublen = i ? PAGE_SIZE :
+                                   (PAGE_SIZE - BTRFS_CSUM_SIZE);
 
                crc = btrfs_crc32c(crc, data, sublen);
        }
@@ -1826,14 +1826,14 @@ again:
                if (block->is_superblock) {
                        bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
                                                    mapped_datav[0]);
-                       if (num_pages * PAGE_CACHE_SIZE <
+                       if (num_pages * PAGE_SIZE <
                            BTRFS_SUPER_INFO_SIZE) {
                                printk(KERN_INFO
                                       "btrfsic: cannot work with too short bios!\n");
                                return;
                        }
                        is_metadata = 1;
-                       BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+                       BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
                        processed_len = BTRFS_SUPER_INFO_SIZE;
                        if (state->print_mask &
                            BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@ again:
                }
                if (is_metadata) {
                        if (!block->is_superblock) {
-                               if (num_pages * PAGE_CACHE_SIZE <
+                               if (num_pages * PAGE_SIZE <
                                    state->metablock_size) {
                                        printk(KERN_INFO
                                               "btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@ again:
                        }
                        block->logical_bytenr = bytenr;
                } else {
-                       if (num_pages * PAGE_CACHE_SIZE <
+                       if (num_pages * PAGE_SIZE <
                            state->datablock_size) {
                                printk(KERN_INFO
                                       "btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@ again:
                        block->logical_bytenr = bytenr;
                        block->is_metadata = 1;
                        if (block->is_superblock) {
-                               BUG_ON(PAGE_CACHE_SIZE !=
+                               BUG_ON(PAGE_SIZE !=
                                       BTRFS_SUPER_INFO_SIZE);
                                ret = btrfsic_process_written_superblock(
                                                state,
@@ -2172,8 +2172,8 @@ again:
 continue_loop:
        BUG_ON(!processed_len);
        dev_bytenr += processed_len;
-       mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
-       num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+       mapped_datav += processed_len >> PAGE_SHIFT;
+       num_pages -= processed_len >> PAGE_SHIFT;
        goto again;
 }
 
@@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                        goto leave;
                cur_bytenr = dev_bytenr;
                for (i = 0; i < bio->bi_vcnt; i++) {
-                       BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+                       BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
                        mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
                        if (!mapped_datav[i]) {
                                while (i > 0) {
@@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root,
        struct list_head *dev_head = &fs_devices->devices;
        struct btrfs_device *device;
 
-       if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+       if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
                printk(KERN_INFO
-                      "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
-                      root->nodesize, PAGE_CACHE_SIZE);
+                      "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
+                      root->nodesize, PAGE_SIZE);
                return -1;
        }
-       if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+       if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
                printk(KERN_INFO
-                      "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
-                      root->sectorsize, PAGE_CACHE_SIZE);
+                      "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
+                      root->sectorsize, PAGE_SIZE);
                return -1;
        }
        state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
index 3346cd8f991032f2457b4c48a8e3440780d53a9a..ff61a41ac90b75a75d93a330a04d2e8b45ec1a99 100644 (file)
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
                csum = ~(u32)0;
 
                kaddr = kmap_atomic(page);
-               csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
+               csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
                btrfs_csum_final(csum, (char *)&csum);
                kunmap_atomic(kaddr);
 
@@ -190,7 +190,7 @@ csum_failed:
        for (index = 0; index < cb->nr_pages; index++) {
                page = cb->compressed_pages[index];
                page->mapping = NULL;
-               page_cache_release(page);
+               put_page(page);
        }
 
        /* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
 static noinline void end_compressed_writeback(struct inode *inode,
                                              const struct compressed_bio *cb)
 {
-       unsigned long index = cb->start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT;
+       unsigned long index = cb->start >> PAGE_SHIFT;
+       unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
        struct page *pages[16];
        unsigned long nr_pages = end_index - index + 1;
        int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
                        if (cb->errors)
                                SetPageError(pages[i]);
                        end_page_writeback(pages[i]);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                }
                nr_pages -= ret;
                index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
        for (index = 0; index < cb->nr_pages; index++) {
                page = cb->compressed_pages[index];
                page->mapping = NULL;
-               page_cache_release(page);
+               put_page(page);
        }
 
        /* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        int ret;
        int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-       WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
+       WARN_ON(start & ((u64)PAGE_SIZE - 1));
        cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
        if (!cb)
                return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                page->mapping = inode->i_mapping;
                if (bio->bi_iter.bi_size)
                        ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
-                                                          PAGE_CACHE_SIZE,
+                                                          PAGE_SIZE,
                                                           bio, 0);
                else
                        ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
-                   PAGE_CACHE_SIZE) {
+               if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+                   PAGE_SIZE) {
                        bio_get(bio);
 
                        /*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        BUG_ON(!bio);
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
-                       bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+                       bio_add_page(bio, page, PAGE_SIZE, 0);
                }
-               if (bytes_left < PAGE_CACHE_SIZE) {
+               if (bytes_left < PAGE_SIZE) {
                        btrfs_info(BTRFS_I(inode)->root->fs_info,
                                        "bytes left %lu compress len %lu nr %lu",
                               bytes_left, cb->compressed_len, cb->nr_pages);
                }
-               bytes_left -= PAGE_CACHE_SIZE;
-               first_byte += PAGE_CACHE_SIZE;
+               bytes_left -= PAGE_SIZE;
+               first_byte += PAGE_SIZE;
                cond_resched();
        }
        bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
        int misses = 0;
 
        page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
-       last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
+       last_offset = (page_offset(page) + PAGE_SIZE);
        em_tree = &BTRFS_I(inode)->extent_tree;
        tree = &BTRFS_I(inode)->io_tree;
 
        if (isize == 0)
                return 0;
 
-       end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+       end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
        while (last_offset < compressed_end) {
-               pg_index = last_offset >> PAGE_CACHE_SHIFT;
+               pg_index = last_offset >> PAGE_SHIFT;
 
                if (pg_index > end_index)
                        break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                        break;
 
                if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto next;
                }
 
-               end = last_offset + PAGE_CACHE_SIZE - 1;
+               end = last_offset + PAGE_SIZE - 1;
                /*
                 * at this point, we have a locked page in the page cache
                 * for these bytes in the file.  But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                lock_extent(tree, last_offset, end);
                read_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, last_offset,
-                                          PAGE_CACHE_SIZE);
+                                          PAGE_SIZE);
                read_unlock(&em_tree->lock);
 
                if (!em || last_offset < em->start ||
-                   (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
+                   (last_offset + PAGE_SIZE > extent_map_end(em)) ||
                    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
                        unlock_extent(tree, last_offset, end);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        break;
                }
                free_extent_map(em);
 
                if (page->index == end_index) {
                        char *userpage;
-                       size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
+                       size_t zero_offset = isize & (PAGE_SIZE - 1);
 
                        if (zero_offset) {
                                int zeros;
-                               zeros = PAGE_CACHE_SIZE - zero_offset;
+                               zeros = PAGE_SIZE - zero_offset;
                                userpage = kmap_atomic(page);
                                memset(userpage + zero_offset, 0, zeros);
                                flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                }
 
                ret = bio_add_page(cb->orig_bio, page,
-                                  PAGE_CACHE_SIZE, 0);
+                                  PAGE_SIZE, 0);
 
-               if (ret == PAGE_CACHE_SIZE) {
+               if (ret == PAGE_SIZE) {
                        nr_pages++;
-                       page_cache_release(page);
+                       put_page(page);
                } else {
                        unlock_extent(tree, last_offset, end);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        break;
                }
 next:
-               last_offset += PAGE_CACHE_SIZE;
+               last_offset += PAGE_SIZE;
        }
        return 0;
 }
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        struct extent_map_tree *em_tree;
        struct compressed_bio *cb;
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+       unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
        unsigned long compressed_len;
        unsigned long nr_pages;
        unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree,
                                   page_offset(bio->bi_io_vec->bv_page),
-                                  PAGE_CACHE_SIZE);
+                                  PAGE_SIZE);
        read_unlock(&em_tree->lock);
        if (!em)
                return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        cb->compress_type = extent_compress_type(bio_flags);
        cb->orig_bio = bio;
 
-       nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
+       nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
        cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
                                       GFP_NOFS);
        if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        add_ra_bio_pages(inode, em_start + em_len, cb);
 
        /* include any pages we added in add_ra-bio_pages */
-       uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+       uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
        cb->len = uncompressed_len;
 
        comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
                page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
-               page->index = em_start >> PAGE_CACHE_SHIFT;
+               page->index = em_start >> PAGE_SHIFT;
 
                if (comp_bio->bi_iter.bi_size)
                        ret = tree->ops->merge_bio_hook(READ, page, 0,
-                                                       PAGE_CACHE_SIZE,
+                                                       PAGE_SIZE,
                                                        comp_bio, 0);
                else
                        ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
-                   PAGE_CACHE_SIZE) {
+               if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+                   PAGE_SIZE) {
                        bio_get(comp_bio);
 
                        ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                        comp_bio->bi_private = cb;
                        comp_bio->bi_end_io = end_compressed_bio_read;
 
-                       bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
+                       bio_add_page(comp_bio, page, PAGE_SIZE, 0);
                }
-               cur_disk_byte += PAGE_CACHE_SIZE;
+               cur_disk_byte += PAGE_SIZE;
        }
        bio_get(comp_bio);
 
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
        /* copy bytes from the working buffer into the pages */
        while (working_bytes > 0) {
-               bytes = min(PAGE_CACHE_SIZE - *pg_offset,
-                           PAGE_CACHE_SIZE - buf_offset);
+               bytes = min(PAGE_SIZE - *pg_offset,
+                           PAGE_SIZE - buf_offset);
                bytes = min(bytes, working_bytes);
                kaddr = kmap_atomic(page_out);
                memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                current_buf_start += bytes;
 
                /* check if we need to pick another page */
-               if (*pg_offset == PAGE_CACHE_SIZE) {
+               if (*pg_offset == PAGE_SIZE) {
                        (*pg_index)++;
                        if (*pg_index >= vcnt)
                                return 0;
index 77592931ab4feba16615997f96d8beb9d385e858..ec7928a27aaad4c2241ef5b9dcd2048401861085 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
+#include <linux/vmalloc.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
                goto out;
        }
 
-       tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL);
+       tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
        if (!tmp_buf) {
-               ret = -ENOMEM;
-               goto out;
+               tmp_buf = vmalloc(left_root->nodesize);
+               if (!tmp_buf) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
        }
 
        left_path->search_commit_root = 1;
@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 out:
        btrfs_free_path(left_path);
        btrfs_free_path(right_path);
-       kfree(tmp_buf);
+       kvfree(tmp_buf);
        return ret;
 }
 
index a1d6652e0c4779ecf7e8c675fddcd06a560cdc32..26bcb487f95885295ad3e24d2a8063cbf6355351 100644 (file)
@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
        dev_replace->cursor_right = 0;
        dev_replace->is_valid = 1;
        dev_replace->item_needs_writeback = 1;
+       atomic64_set(&dev_replace->num_write_errors, 0);
+       atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
        args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
        btrfs_dev_replace_unlock(dev_replace, 1);
 
index d01f89d130e029dd31f9a2bba6d50a9ca1f1b7cd..4e47849d7427247dd179e8d4f320c5fcda687809 100644 (file)
@@ -1062,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
                           (unsigned long long)page_offset(page));
                ClearPagePrivate(page);
                set_page_private(page, 0);
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
@@ -1764,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
        if (err)
                return err;
 
-       bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+       bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
        bdi->congested_fn       = btrfs_congested_fn;
        bdi->congested_data     = info;
        bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -2542,7 +2542,7 @@ int open_ctree(struct super_block *sb,
                err = ret;
                goto fail_bdi;
        }
-       fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+       fs_info->dirty_metadata_batch = PAGE_SIZE *
                                        (1 + ilog2(nr_cpu_ids));
 
        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2787,7 +2787,7 @@ int open_ctree(struct super_block *sb,
         * flag our filesystem as having big metadata blocks if
         * they are bigger than the page size
         */
-       if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
+       if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
                        printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2837,7 +2837,7 @@ int open_ctree(struct super_block *sb,
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
-                                   SZ_4M / PAGE_CACHE_SIZE);
+                                   SZ_4M / PAGE_SIZE);
 
        tree_root->nodesize = nodesize;
        tree_root->sectorsize = sectorsize;
@@ -4076,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
        /* Only PAGE SIZE is supported yet */
-       if (sectorsize != PAGE_CACHE_SIZE) {
+       if (sectorsize != PAGE_SIZE) {
                printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
-                               sectorsize, PAGE_CACHE_SIZE);
+                               sectorsize, PAGE_SIZE);
                ret = -EINVAL;
        }
        if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
index 53e12977bfd012672daf3fc1d2b9a2b2f4362021..84e060eb0de8c6aca562bcfb537f5236a71e9e66 100644 (file)
@@ -3452,7 +3452,7 @@ again:
                num_pages = 1;
 
        num_pages *= 16;
-       num_pages *= PAGE_CACHE_SIZE;
+       num_pages *= PAGE_SIZE;
 
        ret = btrfs_check_data_free_space(inode, 0, num_pages);
        if (ret)
@@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
        loops = 0;
        while (delalloc_bytes && loops < 3) {
                max_reclaim = min(delalloc_bytes, to_reclaim);
-               nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
+               nr_pages = max_reclaim >> PAGE_SHIFT;
                btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
                /*
                 * We need to wait for the async pages to actually start before
@@ -9386,15 +9386,23 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
        u64 dev_min = 1;
        u64 dev_nr = 0;
        u64 target;
+       int debug;
        int index;
        int full = 0;
        int ret = 0;
 
+       debug = btrfs_test_opt(root, ENOSPC_DEBUG);
+
        block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
 
        /* odd, couldn't find the block group, leave it alone */
-       if (!block_group)
+       if (!block_group) {
+               if (debug)
+                       btrfs_warn(root->fs_info,
+                                  "can't find block group for bytenr %llu",
+                                  bytenr);
                return -1;
+       }
 
        min_free = btrfs_block_group_used(&block_group->item);
 
@@ -9448,8 +9456,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                 * this is just a balance, so if we were marked as full
                 * we know there is no space for a new chunk
                 */
-               if (full)
+               if (full) {
+                       if (debug)
+                               btrfs_warn(root->fs_info,
+                                       "no space to alloc new chunk for block group %llu",
+                                       block_group->key.objectid);
                        goto out;
+               }
 
                index = get_block_group_index(block_group);
        }
@@ -9496,6 +9509,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                        ret = -1;
                }
        }
+       if (debug && ret == -1)
+               btrfs_warn(root->fs_info,
+                       "no space to allocate a new chunk for block group %llu",
+                       block_group->key.objectid);
        mutex_unlock(&root->fs_info->chunk_mutex);
        btrfs_end_transaction(trans, root);
 out:
index 76a0c8597d98d19f1f36df5cfc2fea57bf34e865..d247fc0eea1948ecfe6bcd128b241bf01b2bf998 100644 (file)
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 
 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 {
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        struct page *page;
 
        while (index <= end_index) {
                page = find_get_page(inode->i_mapping, index);
                BUG_ON(!page); /* Pages should be in the extent_io_tree */
                clear_page_dirty_for_io(page);
-               page_cache_release(page);
+               put_page(page);
                index++;
        }
 }
 
 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
 {
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        struct page *page;
 
        while (index <= end_index) {
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
                BUG_ON(!page); /* Pages should be in the extent_io_tree */
                __set_page_dirty_nobuffers(page);
                account_page_redirty(page);
-               page_cache_release(page);
+               put_page(page);
                index++;
        }
 }
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
  */
 static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
 {
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        struct page *page;
 
        while (index <= end_index) {
                page = find_get_page(tree->mapping, index);
                BUG_ON(!page); /* Pages should be in the extent_io_tree */
                set_page_writeback(page);
-               page_cache_release(page);
+               put_page(page);
                index++;
        }
 }
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
 {
        int ret;
        struct page *pages[16];
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        unsigned long nr_pages = end_index - index + 1;
        int i;
 
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
                for (i = 0; i < ret; i++) {
                        if (pages[i] != locked_page)
                                unlock_page(pages[i]);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                }
                nr_pages -= ret;
                index += ret;
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode,
                                        u64 delalloc_start,
                                        u64 delalloc_end)
 {
-       unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
+       unsigned long index = delalloc_start >> PAGE_SHIFT;
        unsigned long start_index = index;
-       unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = delalloc_end >> PAGE_SHIFT;
        unsigned long pages_locked = 0;
        struct page *pages[16];
        unsigned long nrpages;
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode,
                                    pages[i]->mapping != inode->i_mapping) {
                                        ret = -EAGAIN;
                                        unlock_page(pages[i]);
-                                       page_cache_release(pages[i]);
+                                       put_page(pages[i]);
                                        goto done;
                                }
                        }
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                        pages_locked++;
                }
                nrpages -= ret;
@@ -1636,7 +1636,7 @@ done:
                __unlock_for_delalloc(inode, locked_page,
                              delalloc_start,
                              ((u64)(start_index + pages_locked - 1)) <<
-                             PAGE_CACHE_SHIFT);
+                             PAGE_SHIFT);
        }
        return ret;
 }
@@ -1696,7 +1696,7 @@ again:
                free_extent_state(cached_state);
                cached_state = NULL;
                if (!loops) {
-                       max_bytes = PAGE_CACHE_SIZE;
+                       max_bytes = PAGE_SIZE;
                        loops = 1;
                        goto again;
                } else {
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
        int ret;
        struct page *pages[16];
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        unsigned long nr_pages = end_index - index + 1;
        int i;
 
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                SetPagePrivate2(pages[i]);
 
                        if (pages[i] == locked_page) {
-                               page_cache_release(pages[i]);
+                               put_page(pages[i]);
                                continue;
                        }
                        if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                end_page_writeback(pages[i]);
                        if (page_ops & PAGE_UNLOCK)
                                unlock_page(pages[i]);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                }
                nr_pages -= ret;
                index += ret;
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
 {
        u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
+       u64 end = start + PAGE_SIZE - 1;
        if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
                SetPageUptodate(page);
 }
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
                struct page *p = eb->pages[i];
 
                ret = repair_io_failure(root->fs_info->btree_inode, start,
-                                       PAGE_CACHE_SIZE, start, p,
+                                       PAGE_SIZE, start, p,
                                        start - page_offset(p), mirror_num);
                if (ret)
                        break;
-               start += PAGE_CACHE_SIZE;
+               start += PAGE_SIZE;
        }
 
        return ret;
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio)
                 * advance bv_offset and adjust bv_len to compensate.
                 * Print a warning for nonzero offsets, and an error
                 * if they don't add up to a full page.  */
-               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
-                       if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+               if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+                       if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
                                btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
                                   "partial page write in btrfs with offset %u and length %u",
                                        bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio)
                 * advance bv_offset and adjust bv_len to compensate.
                 * Print a warning for nonzero offsets, and an error
                 * if they don't add up to a full page.  */
-               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
-                       if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+               if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+                       if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
                                btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
                                   "partial page read in btrfs with offset %u and length %u",
                                        bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio)
 readpage_ok:
                if (likely(uptodate)) {
                        loff_t i_size = i_size_read(inode);
-                       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+                       pgoff_t end_index = i_size >> PAGE_SHIFT;
                        unsigned off;
 
                        /* Zero out the end if this page straddles i_size */
-                       off = i_size & (PAGE_CACHE_SIZE-1);
+                       off = i_size & (PAGE_SIZE-1);
                        if (page->index == end_index && off)
-                               zero_user_segment(page, off, PAGE_CACHE_SIZE);
+                               zero_user_segment(page, off, PAGE_SIZE);
                        SetPageUptodate(page);
                } else {
                        ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        struct bio *bio;
        int contig = 0;
        int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
-       size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
+       size_t page_size = min_t(size_t, size, PAGE_SIZE);
 
        if (bio_ret && *bio_ret) {
                bio = *bio_ret;
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
 {
        if (!PagePrivate(page)) {
                SetPagePrivate(page);
-               page_cache_get(page);
+               get_page(page);
                set_page_private(page, (unsigned long)eb);
        } else {
                WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page)
 {
        if (!PagePrivate(page)) {
                SetPagePrivate(page);
-               page_cache_get(page);
+               get_page(page);
                set_page_private(page, EXTENT_PAGE_PRIVATE);
        }
 }
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree,
 {
        struct inode *inode = page->mapping->host;
        u64 start = page_offset(page);
-       u64 page_end = start + PAGE_CACHE_SIZE - 1;
+       u64 page_end = start + PAGE_SIZE - 1;
        u64 end;
        u64 cur = start;
        u64 extent_offset;
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree,
                }
        }
 
-       if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
+       if (page->index == last_byte >> PAGE_SHIFT) {
                char *userpage;
-               size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
+               size_t zero_offset = last_byte & (PAGE_SIZE - 1);
 
                if (zero_offset) {
-                       iosize = PAGE_CACHE_SIZE - zero_offset;
+                       iosize = PAGE_SIZE - zero_offset;
                        userpage = kmap_atomic(page);
                        memset(userpage + zero_offset, 0, iosize);
                        flush_dcache_page(page);
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree,
                }
        }
        while (cur <= end) {
-               unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+               unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
                bool force_bio_submit = false;
 
                if (cur >= last_byte) {
                        char *userpage;
                        struct extent_state *cached = NULL;
 
-                       iosize = PAGE_CACHE_SIZE - pg_offset;
+                       iosize = PAGE_SIZE - pg_offset;
                        userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
                              mirror_num, bio_flags, rw, prev_em_start);
-               page_cache_release(pages[index]);
+               put_page(pages[index]);
        }
 }
 
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree,
                page_start = page_offset(pages[index]);
                if (!end) {
                        start = page_start;
-                       end = start + PAGE_CACHE_SIZE - 1;
+                       end = start + PAGE_SIZE - 1;
                        first_index = index;
                } else if (end + 1 == page_start) {
-                       end += PAGE_CACHE_SIZE;
+                       end += PAGE_SIZE;
                } else {
                        __do_contiguous_readpages(tree, &pages[first_index],
                                                  index - first_index, start,
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                                                  bio, mirror_num, bio_flags,
                                                  rw, prev_em_start);
                        start = page_start;
-                       end = start + PAGE_CACHE_SIZE - 1;
+                       end = start + PAGE_SIZE - 1;
                        first_index = index;
                }
        }
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        struct inode *inode = page->mapping->host;
        struct btrfs_ordered_extent *ordered;
        u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
+       u64 end = start + PAGE_SIZE - 1;
        int ret;
 
        while (1) {
                lock_extent(tree, start, end);
                ordered = btrfs_lookup_ordered_range(inode, start,
-                                               PAGE_CACHE_SIZE);
+                                               PAGE_SIZE);
                if (!ordered)
                        break;
                unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                              unsigned long *nr_written)
 {
        struct extent_io_tree *tree = epd->tree;
-       u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+       u64 page_end = delalloc_start + PAGE_SIZE - 1;
        u64 nr_delalloc;
        u64 delalloc_to_write = 0;
        u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                        goto done;
                }
                /*
-                * delalloc_end is already one less than the total
-                * length, so we don't subtract one from
-                * PAGE_CACHE_SIZE
+                * delalloc_end is already one less than the total length, so
+                * we don't subtract one from PAGE_SIZE
                 */
                delalloc_to_write += (delalloc_end - delalloc_start +
-                                     PAGE_CACHE_SIZE) >>
-                                     PAGE_CACHE_SHIFT;
+                                     PAGE_SIZE) >> PAGE_SHIFT;
                delalloc_start = delalloc_end + 1;
        }
        if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
 {
        struct extent_io_tree *tree = epd->tree;
        u64 start = page_offset(page);
-       u64 page_end = start + PAGE_CACHE_SIZE - 1;
+       u64 page_end = start + PAGE_SIZE - 1;
        u64 end;
        u64 cur = start;
        u64 extent_offset;
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
                if (ret) {
                        SetPageError(page);
                } else {
-                       unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
+                       unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
 
                        set_range_writeback(tree, cur, cur + iosize - 1);
                        if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        struct inode *inode = page->mapping->host;
        struct extent_page_data *epd = data;
        u64 start = page_offset(page);
-       u64 page_end = start + PAGE_CACHE_SIZE - 1;
+       u64 page_end = start + PAGE_SIZE - 1;
        int ret;
        int nr = 0;
        size_t pg_offset = 0;
        loff_t i_size = i_size_read(inode);
-       unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = i_size >> PAGE_SHIFT;
        int write_flags;
        unsigned long nr_written = 0;
 
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 
        ClearPageError(page);
 
-       pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
+       pg_offset = i_size & (PAGE_SIZE - 1);
        if (page->index > end_index ||
           (page->index == end_index && !pg_offset)) {
-               page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
                unlock_page(page);
                return 0;
        }
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 
                userpage = kmap_atomic(page);
                memset(userpage + pg_offset, 0,
-                      PAGE_CACHE_SIZE - pg_offset);
+                      PAGE_SIZE - pg_offset);
                kunmap_atomic(userpage);
                flush_dcache_page(page);
        }
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
                clear_page_dirty_for_io(p);
                set_page_writeback(p);
                ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
-                                        PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+                                        PAGE_SIZE, 0, bdev, &epd->bio,
                                         -1, end_bio_extent_buffer_writepage,
                                         0, epd->bio_flags, bio_flags, false);
                epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
                        ret = -EIO;
                        break;
                }
-               offset += PAGE_CACHE_SIZE;
+               offset += PAGE_SIZE;
                update_nr_written(p, wbc, 1);
                unlock_page(p);
        }
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping,
                index = mapping->writeback_index; /* Start from prev offset */
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                scanned = 1;
        }
        if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                index = mapping->writeback_index; /* Start from prev offset */
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                scanned = 1;
        }
        if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
        int ret = 0;
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
-               PAGE_CACHE_SHIFT;
+       unsigned long nr_pages = (end - start + PAGE_SIZE) >>
+               PAGE_SHIFT;
 
        struct extent_page_data epd = {
                .bio = NULL,
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
        };
 
        while (start <= end) {
-               page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+               page = find_get_page(mapping, start >> PAGE_SHIFT);
                if (clear_page_dirty_for_io(page))
                        ret = __extent_writepage(page, &wbc_writepages, &epd);
                else {
                        if (tree->ops && tree->ops->writepage_end_io_hook)
                                tree->ops->writepage_end_io_hook(page, start,
-                                                start + PAGE_CACHE_SIZE - 1,
+                                                start + PAGE_SIZE - 1,
                                                 NULL, 1);
                        unlock_page(page);
                }
-               page_cache_release(page);
-               start += PAGE_CACHE_SIZE;
+               put_page(page);
+               start += PAGE_SIZE;
        }
 
        flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree,
                list_del(&page->lru);
                if (add_to_page_cache_lru(page, mapping,
                                        page->index, GFP_NOFS)) {
-                       page_cache_release(page);
+                       put_page(page);
                        continue;
                }
 
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
 {
        struct extent_state *cached_state = NULL;
        u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
+       u64 end = start + PAGE_SIZE - 1;
        size_t blocksize = page->mapping->host->i_sb->s_blocksize;
 
        start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map,
                                    struct page *page, gfp_t mask)
 {
        u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
+       u64 end = start + PAGE_SIZE - 1;
        int ret = 1;
 
        if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
 {
        struct extent_map *em;
        u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
+       u64 end = start + PAGE_SIZE - 1;
 
        if (gfpflags_allow_blocking(mask) &&
            page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
                        ClearPagePrivate(page);
                        set_page_private(page, 0);
                        /* One for the page private */
-                       page_cache_release(page);
+                       put_page(page);
                }
 
                if (mapped)
                        spin_unlock(&page->mapping->private_lock);
 
                /* One for when we alloced the page */
-               page_cache_release(page);
+               put_page(page);
        } while (index != 0);
 }
 
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
        rcu_read_lock();
        eb = radix_tree_lookup(&fs_info->buffer_radix,
-                              start >> PAGE_CACHE_SHIFT);
+                              start >> PAGE_SHIFT);
        if (eb && atomic_inc_not_zero(&eb->refs)) {
                rcu_read_unlock();
                /*
@@ -4829,7 +4827,7 @@ again:
                goto free_eb;
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> PAGE_CACHE_SHIFT, eb);
+                               start >> PAGE_SHIFT, eb);
        spin_unlock(&fs_info->buffer_lock);
        radix_tree_preload_end();
        if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        unsigned long len = fs_info->tree_root->nodesize;
        unsigned long num_pages = num_extent_pages(start, len);
        unsigned long i;
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
        struct extent_buffer *eb;
        struct extent_buffer *exists = NULL;
        struct page *p;
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                        if (atomic_inc_not_zero(&exists->refs)) {
                                spin_unlock(&mapping->private_lock);
                                unlock_page(p);
-                               page_cache_release(p);
+                               put_page(p);
                                mark_extent_buffer_accessed(exists, p);
                                goto free_eb;
                        }
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                         */
                        ClearPagePrivate(p);
                        WARN_ON(PageDirty(p));
-                       page_cache_release(p);
+                       put_page(p);
                }
                attach_extent_buffer_page(eb, p);
                spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@ again:
 
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> PAGE_CACHE_SHIFT, eb);
+                               start >> PAGE_SHIFT, eb);
        spin_unlock(&fs_info->buffer_lock);
        radix_tree_preload_end();
        if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
 
                        spin_lock(&fs_info->buffer_lock);
                        radix_tree_delete(&fs_info->buffer_radix,
-                                         eb->start >> PAGE_CACHE_SHIFT);
+                                         eb->start >> PAGE_SHIFT);
                        spin_unlock(&fs_info->buffer_lock);
                } else {
                        spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
 
        if (start) {
                WARN_ON(start < eb->start);
-               start_i = (start >> PAGE_CACHE_SHIFT) -
-                       (eb->start >> PAGE_CACHE_SHIFT);
+               start_i = (start >> PAGE_SHIFT) -
+                       (eb->start >> PAGE_SHIFT);
        } else {
                start_i = 0;
        }
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
        struct page *page;
        char *kaddr;
        char *dst = (char *)dstv;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
 
-       offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+       offset = (start_offset + start) & (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = eb->pages[i];
 
-               cur = min(len, (PAGE_CACHE_SIZE - offset));
+               cur = min(len, (PAGE_SIZE - offset));
                kaddr = page_address(page);
                memcpy(dst, kaddr + offset, cur);
 
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
        struct page *page;
        char *kaddr;
        char __user *dst = (char __user *)dstv;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
        int ret = 0;
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
 
-       offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+       offset = (start_offset + start) & (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = eb->pages[i];
 
-               cur = min(len, (PAGE_CACHE_SIZE - offset));
+               cur = min(len, (PAGE_SIZE - offset));
                kaddr = page_address(page);
                if (copy_to_user(dst, kaddr + offset, cur)) {
                        ret = -EFAULT;
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                               unsigned long *map_start,
                               unsigned long *map_len)
 {
-       size_t offset = start & (PAGE_CACHE_SIZE - 1);
+       size_t offset = start & (PAGE_SIZE - 1);
        char *kaddr;
        struct page *p;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
        unsigned long end_i = (start_offset + start + min_len - 1) >>
-               PAGE_CACHE_SHIFT;
+               PAGE_SHIFT;
 
        if (i != end_i)
                return -EINVAL;
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                *map_start = 0;
        } else {
                offset = 0;
-               *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
+               *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
        }
 
        if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
        p = eb->pages[i];
        kaddr = page_address(p);
        *map = kaddr + offset;
-       *map_len = PAGE_CACHE_SIZE - offset;
+       *map_len = PAGE_SIZE - offset;
        return 0;
 }
 
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
        struct page *page;
        char *kaddr;
        char *ptr = (char *)ptrv;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
        int ret = 0;
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
 
-       offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+       offset = (start_offset + start) & (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = eb->pages[i];
 
-               cur = min(len, (PAGE_CACHE_SIZE - offset));
+               cur = min(len, (PAGE_SIZE - offset));
 
                kaddr = page_address(page);
                ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
        struct page *page;
        char *kaddr;
        char *src = (char *)srcv;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
 
-       offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+       offset = (start_offset + start) & (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = eb->pages[i];
                WARN_ON(!PageUptodate(page));
 
-               cur = min(len, PAGE_CACHE_SIZE - offset);
+               cur = min(len, PAGE_SIZE - offset);
                kaddr = page_address(page);
                memcpy(kaddr + offset, src, cur);
 
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
        size_t offset;
        struct page *page;
        char *kaddr;
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
 
-       offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+       offset = (start_offset + start) & (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = eb->pages[i];
                WARN_ON(!PageUptodate(page));
 
-               cur = min(len, PAGE_CACHE_SIZE - offset);
+               cur = min(len, PAGE_SIZE - offset);
                kaddr = page_address(page);
                memset(kaddr + offset, c, cur);
 
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
        size_t offset;
        struct page *page;
        char *kaddr;
-       size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
-       unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
+       size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+       unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
 
        WARN_ON(src->len != dst_len);
 
        offset = (start_offset + dst_offset) &
-               (PAGE_CACHE_SIZE - 1);
+               (PAGE_SIZE - 1);
 
        while (len > 0) {
                page = dst->pages[i];
                WARN_ON(!PageUptodate(page));
 
-               cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
+               cur = min(len, (unsigned long)(PAGE_SIZE - offset));
 
                kaddr = page_address(page);
                read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
                                    unsigned long *page_index,
                                    size_t *page_offset)
 {
-       size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+       size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
        size_t byte_offset = BIT_BYTE(nr);
        size_t offset;
 
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
         */
        offset = start_offset + start + byte_offset;
 
-       *page_index = offset >> PAGE_CACHE_SHIFT;
-       *page_offset = offset & (PAGE_CACHE_SIZE - 1);
+       *page_index = offset >> PAGE_SHIFT;
+       *page_offset = offset & (PAGE_SIZE - 1);
 }
 
 /**
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
                len -= bits_to_set;
                bits_to_set = BITS_PER_BYTE;
                mask_to_set = ~0U;
-               if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+               if (++offset >= PAGE_SIZE && len > 0) {
                        offset = 0;
                        page = eb->pages[++i];
                        WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
                len -= bits_to_clear;
                bits_to_clear = BITS_PER_BYTE;
                mask_to_clear = ~0U;
-               if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+               if (++offset >= PAGE_SIZE && len > 0) {
                        offset = 0;
                        page = eb->pages[++i];
                        WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
        size_t cur;
        size_t dst_off_in_page;
        size_t src_off_in_page;
-       size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+       size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
        unsigned long dst_i;
        unsigned long src_i;
 
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 
        while (len > 0) {
                dst_off_in_page = (start_offset + dst_offset) &
-                       (PAGE_CACHE_SIZE - 1);
+                       (PAGE_SIZE - 1);
                src_off_in_page = (start_offset + src_offset) &
-                       (PAGE_CACHE_SIZE - 1);
+                       (PAGE_SIZE - 1);
 
-               dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
-               src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
+               dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
+               src_i = (start_offset + src_offset) >> PAGE_SHIFT;
 
-               cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
+               cur = min(len, (unsigned long)(PAGE_SIZE -
                                               src_off_in_page));
                cur = min_t(unsigned long, cur,
-                       (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
+                       (unsigned long)(PAGE_SIZE - dst_off_in_page));
 
                copy_pages(dst->pages[dst_i], dst->pages[src_i],
                           dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
        size_t src_off_in_page;
        unsigned long dst_end = dst_offset + len - 1;
        unsigned long src_end = src_offset + len - 1;
-       size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+       size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
        unsigned long dst_i;
        unsigned long src_i;
 
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
                return;
        }
        while (len > 0) {
-               dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
-               src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
+               dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
+               src_i = (start_offset + src_end) >> PAGE_SHIFT;
 
                dst_off_in_page = (start_offset + dst_end) &
-                       (PAGE_CACHE_SIZE - 1);
+                       (PAGE_SIZE - 1);
                src_off_in_page = (start_offset + src_end) &
-                       (PAGE_CACHE_SIZE - 1);
+                       (PAGE_SIZE - 1);
 
                cur = min_t(unsigned long, len, src_off_in_page + 1);
                cur = min(cur, dst_off_in_page + 1);
index 5dbf92e68fbd1649366444c4dd860aa508e90cac..b5e0ade90e88137472bfeb290f909eee0a264274 100644 (file)
@@ -120,7 +120,7 @@ struct extent_state {
 };
 
 #define INLINE_EXTENT_BUFFER_PAGES 16
-#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
+#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
 struct extent_buffer {
        u64 start;
        unsigned long len;
@@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
 
 static inline unsigned long num_extent_pages(u64 start, u64 len)
 {
-       return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
-               (start >> PAGE_CACHE_SHIFT);
+       return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
+               (start >> PAGE_SHIFT);
 }
 
 static inline void extent_buffer_get(struct extent_buffer *eb)
index b5baf5bdc8e183df9fd697eda5a7d2d16b97baa0..7a7d6e253cfc01968b1d1fbaf02fb6815ef6ad6e 100644 (file)
@@ -32,7 +32,7 @@
                                  size) - 1))
 
 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
-                                      PAGE_CACHE_SIZE))
+                                      PAGE_SIZE))
 
 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
                                   sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                csum = (u8 *)dst;
        }
 
-       if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
+       if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
                path->reada = READA_FORWARD;
 
        WARN_ON(bio->bi_vcnt <= 0);
index 15a09cb156cec4c59776c683ecc2f98527e98d15..8d7b5a45c00523f4ca7ee5d58810b9d571ad4148 100644 (file)
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
        size_t copied = 0;
        size_t total_copied = 0;
        int pg = 0;
-       int offset = pos & (PAGE_CACHE_SIZE - 1);
+       int offset = pos & (PAGE_SIZE - 1);
 
        while (write_bytes > 0) {
                size_t count = min_t(size_t,
-                                    PAGE_CACHE_SIZE - offset, write_bytes);
+                                    PAGE_SIZE - offset, write_bytes);
                struct page *page = prepared_pages[pg];
                /*
                 * Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
                if (unlikely(copied == 0))
                        break;
 
-               if (copied < PAGE_CACHE_SIZE - offset) {
+               if (copied < PAGE_SIZE - offset) {
                        offset += copied;
                } else {
                        pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
                 */
                ClearPageChecked(pages[i]);
                unlock_page(pages[i]);
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
 }
 
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
 {
        int ret = 0;
 
-       if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+       if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
            !PageUptodate(page)) {
                ret = btrfs_readpage(NULL, page);
                if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
                                  size_t write_bytes, bool force_uptodate)
 {
        int i;
-       unsigned long index = pos >> PAGE_CACHE_SHIFT;
+       unsigned long index = pos >> PAGE_SHIFT;
        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
        int err = 0;
        int faili;
@@ -1345,7 +1345,7 @@ again:
                        err = prepare_uptodate_page(inode, pages[i],
                                                    pos + write_bytes, false);
                if (err) {
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                        if (err == -EAGAIN) {
                                err = 0;
                                goto again;
@@ -1360,7 +1360,7 @@ again:
 fail:
        while (faili >= 0) {
                unlock_page(pages[faili]);
-               page_cache_release(pages[faili]);
+               put_page(pages[faili]);
                faili--;
        }
        return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
                                             cached_state, GFP_NOFS);
                        for (i = 0; i < num_pages; i++) {
                                unlock_page(pages[i]);
-                               page_cache_release(pages[i]);
+                               put_page(pages[i]);
                        }
                        btrfs_start_ordered_extent(inode, ordered, 1);
                        btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        bool force_page_uptodate = false;
        bool need_unlock;
 
-       nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
-                       PAGE_CACHE_SIZE / (sizeof(struct page *)));
+       nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
+                       PAGE_SIZE / (sizeof(struct page *)));
        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
        nrptrs = max(nrptrs, 8);
        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                return -ENOMEM;
 
        while (iov_iter_count(i) > 0) {
-               size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+               size_t offset = pos & (PAGE_SIZE - 1);
                size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
-                                        nrptrs * (size_t)PAGE_CACHE_SIZE -
+                                        nrptrs * (size_t)PAGE_SIZE -
                                         offset);
                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                               PAGE_CACHE_SIZE);
+                                               PAGE_SIZE);
                size_t reserve_bytes;
                size_t dirty_pages;
                size_t copied;
@@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                         * write_bytes, so scale down.
                         */
                        num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                                PAGE_CACHE_SIZE);
+                                                PAGE_SIZE);
                        reserve_bytes = round_up(write_bytes + sector_offset,
                                        root->sectorsize);
                        goto reserve_metadata;
@@ -1609,7 +1609,7 @@ again:
                } else {
                        force_page_uptodate = false;
                        dirty_pages = DIV_ROUND_UP(copied + offset,
-                                                  PAGE_CACHE_SIZE);
+                                                  PAGE_SIZE);
                }
 
                /*
@@ -1641,7 +1641,7 @@ again:
                                u64 __pos;
 
                                __pos = round_down(pos, root->sectorsize) +
-                                       (dirty_pages << PAGE_CACHE_SHIFT);
+                                       (dirty_pages << PAGE_SHIFT);
                                btrfs_delalloc_release_space(inode, __pos,
                                                             release_bytes);
                        }
@@ -1682,7 +1682,7 @@ again:
                cond_resched();
 
                balance_dirty_pages_ratelimited(inode->i_mapping);
-               if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
+               if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root);
 
                pos += copied;
@@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                goto out;
        written += written_buffered;
        iocb->ki_pos = pos + written_buffered;
-       invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
-                                endbyte >> PAGE_CACHE_SHIFT);
+       invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+                                endbyte >> PAGE_SHIFT);
 out:
        return written ? written : err;
 }
@@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
  */
 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct dentry *dentry = file->f_path.dentry;
+       struct dentry *dentry = file_dentry(file);
        struct inode *inode = d_inode(dentry);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
@@ -2682,9 +2682,12 @@ static long btrfs_fallocate(struct file *file, int mode,
                return ret;
 
        inode_lock(inode);
-       ret = inode_newsize_ok(inode, alloc_end);
-       if (ret)
-               goto out;
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
+               ret = inode_newsize_ok(inode, offset + len);
+               if (ret)
+                       goto out;
+       }
 
        /*
         * TODO: Move these two operations after we have checked
index 8f835bfa1bdd2a7bc3a7a9e3d1324f75a95e8193..5e6062c26129f5d5f5a9362cc36fa4608062ebd9 100644 (file)
@@ -29,7 +29,7 @@
 #include "inode-map.h"
 #include "volumes.h"
 
-#define BITS_PER_BITMAP                (PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_32K
 
 struct btrfs_trim_range {
@@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode)
                return -ENOMEM;
 
        file_ra_state_init(ra, inode->i_mapping);
-       last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+       last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
        page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 
@@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
        int num_pages;
        int check_crcs = 0;
 
-       num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+       num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
        if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
                check_crcs = 1;
 
        /* Make sure we can fit our crcs into the first page */
        if (write && check_crcs &&
-           (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+           (num_pages * sizeof(u32)) >= PAGE_SIZE)
                return -ENOSPC;
 
        memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
        io_ctl->page = io_ctl->pages[io_ctl->index++];
        io_ctl->cur = page_address(io_ctl->page);
        io_ctl->orig = io_ctl->cur;
-       io_ctl->size = PAGE_CACHE_SIZE;
+       io_ctl->size = PAGE_SIZE;
        if (clear)
-               memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
+               memset(io_ctl->cur, 0, PAGE_SIZE);
 }
 
 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
                if (io_ctl->pages[i]) {
                        ClearPageChecked(io_ctl->pages[i]);
                        unlock_page(io_ctl->pages[i]);
-                       page_cache_release(io_ctl->pages[i]);
+                       put_page(io_ctl->pages[i]);
                }
        }
 }
@@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
                offset = sizeof(u32) * io_ctl->num_pages;
 
        crc = btrfs_csum_data(io_ctl->orig + offset, crc,
-                             PAGE_CACHE_SIZE - offset);
+                             PAGE_SIZE - offset);
        btrfs_csum_final(crc, (char *)&crc);
        io_ctl_unmap_page(io_ctl);
        tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
 
        io_ctl_map_page(io_ctl, 0);
        crc = btrfs_csum_data(io_ctl->orig + offset, crc,
-                             PAGE_CACHE_SIZE - offset);
+                             PAGE_SIZE - offset);
        btrfs_csum_final(crc, (char *)&crc);
        if (val != crc) {
                btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
                io_ctl_map_page(io_ctl, 0);
        }
 
-       memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
+       memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
        io_ctl_set_crc(io_ctl, io_ctl->index - 1);
        if (io_ctl->index < io_ctl->num_pages)
                io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
        if (ret)
                return ret;
 
-       memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
+       memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
        io_ctl_unmap_page(io_ctl);
 
        return 0;
@@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                } else {
                        ASSERT(num_bitmaps);
                        num_bitmaps--;
-                       e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+                       e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
                        if (!e->bitmap) {
                                kmem_cache_free(
                                        btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
 
        if (bitmap_bytes >= max_bytes) {
                ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@ new_bitmap:
                }
 
                /* allocate the bitmap */
-               info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+               info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
                spin_lock(&ctl->tree_lock);
                if (!info->bitmap) {
                        ret = -ENOMEM;
@@ -3580,7 +3580,7 @@ again:
        }
 
        if (!map) {
-               map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+               map = kzalloc(PAGE_SIZE, GFP_NOFS);
                if (!map) {
                        kmem_cache_free(btrfs_free_space_cachep, info);
                        return -ENOMEM;
index 1f0ec19b23f615faae7a6ba81b6b203657ae4d23..70107f7c930730c8054d45c153dec71cf9227270 100644 (file)
@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
 }
 
 #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
-#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define INODES_PER_BITMAP (PAGE_SIZE * 8)
 
 /*
  * The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        }
 
        ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
-                               PAGE_CACHE_SIZE / sizeof(*info);
+                               PAGE_SIZE / sizeof(*info);
 }
 
 /*
@@ -481,12 +481,12 @@ again:
 
        spin_lock(&ctl->tree_lock);
        prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
-       prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
-       prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+       prealloc = ALIGN(prealloc, PAGE_SIZE);
+       prealloc += ctl->total_bitmaps * PAGE_SIZE;
        spin_unlock(&ctl->tree_lock);
 
        /* Just to make sure we have enough space */
-       prealloc += 8 * PAGE_CACHE_SIZE;
+       prealloc += 8 * PAGE_SIZE;
 
        ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
        if (ret)
index 41a5688ffdfe8d57a61a642dce6c6ee80c3872f8..2aaba58b4856be3fc6b179ad3e2d0e94e559dc04 100644 (file)
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
                while (compressed_size > 0) {
                        cpage = compressed_pages[i];
                        cur_size = min_t(unsigned long, compressed_size,
-                                      PAGE_CACHE_SIZE);
+                                      PAGE_SIZE);
 
                        kaddr = kmap_atomic(cpage);
                        write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
                                                  compress_type);
        } else {
                page = find_get_page(inode->i_mapping,
-                                    start >> PAGE_CACHE_SHIFT);
+                                    start >> PAGE_SHIFT);
                btrfs_set_file_extent_compression(leaf, ei, 0);
                kaddr = kmap_atomic(page);
-               offset = start & (PAGE_CACHE_SIZE - 1);
+               offset = start & (PAGE_SIZE - 1);
                write_extent_buffer(leaf, kaddr + offset, ptr, size);
                kunmap_atomic(kaddr);
-               page_cache_release(page);
+               put_page(page);
        }
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
         * And at reserve time, it's always aligned to page size, so
         * just free one page here.
         */
-       btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
+       btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
        btrfs_free_path(path);
        btrfs_end_transaction(trans, root);
        return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
        actual_end = min_t(u64, isize, end + 1);
 again:
        will_compress = 0;
-       nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
-       nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
+       nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+       nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
 
        /*
         * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
 
                if (!ret) {
                        unsigned long offset = total_compressed &
-                               (PAGE_CACHE_SIZE - 1);
+                               (PAGE_SIZE - 1);
                        struct page *page = pages[nr_pages_ret - 1];
                        char *kaddr;
 
@@ -524,7 +524,7 @@ again:
                        if (offset) {
                                kaddr = kmap_atomic(page);
                                memset(kaddr + offset, 0,
-                                      PAGE_CACHE_SIZE - offset);
+                                      PAGE_SIZE - offset);
                                kunmap_atomic(kaddr);
                        }
                        will_compress = 1;
@@ -580,7 +580,7 @@ cont:
                 * one last check to make sure the compression is really a
                 * win, compare the page count read with the blocks on disk
                 */
-               total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
+               total_in = ALIGN(total_in, PAGE_SIZE);
                if (total_compressed >= total_in) {
                        will_compress = 0;
                } else {
@@ -594,7 +594,7 @@ cont:
                 */
                for (i = 0; i < nr_pages_ret; i++) {
                        WARN_ON(pages[i]->mapping);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                }
                kfree(pages);
                pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
 free_pages_out:
        for (i = 0; i < nr_pages_ret; i++) {
                WARN_ON(pages[i]->mapping);
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
        kfree(pages);
 }
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
 
        for (i = 0; i < async_extent->nr_pages; i++) {
                WARN_ON(async_extent->pages[i]->mapping);
-               page_cache_release(async_extent->pages[i]);
+               put_page(async_extent->pages[i]);
        }
        kfree(async_extent->pages);
        async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
                                     PAGE_END_WRITEBACK);
 
                        *nr_written = *nr_written +
-                            (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
+                            (end - start + PAGE_SIZE) / PAGE_SIZE;
                        *page_started = 1;
                        goto out;
                } else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
        async_cow = container_of(work, struct async_cow, work);
 
        root = async_cow->root;
-       nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
-               PAGE_CACHE_SHIFT;
+       nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
+               PAGE_SHIFT;
 
        /*
         * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                                async_cow_start, async_cow_submit,
                                async_cow_free);
 
-               nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
-                       PAGE_CACHE_SHIFT;
+               nr_pages = (cur_end - start + PAGE_SIZE) >>
+                       PAGE_SHIFT;
                atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
 
                btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
                              struct extent_state **cached_state)
 {
-       WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
+       WARN_ON((end & (PAGE_SIZE - 1)) == 0);
        return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
                                   cached_state, GFP_NOFS);
 }
@@ -1993,7 +1993,7 @@ again:
 
        inode = page->mapping->host;
        page_start = page_offset(page);
-       page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
+       page_end = page_offset(page) + PAGE_SIZE - 1;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
                         &cached_state);
@@ -2003,7 +2003,7 @@ again:
                goto out;
 
        ordered = btrfs_lookup_ordered_range(inode, page_start,
-                                       PAGE_CACHE_SIZE);
+                                       PAGE_SIZE);
        if (ordered) {
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
                                     page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
        }
 
        ret = btrfs_delalloc_reserve_space(inode, page_start,
-                                          PAGE_CACHE_SIZE);
+                                          PAGE_SIZE);
        if (ret) {
                mapping_set_error(page->mapping, ret);
                end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
                             &cached_state, GFP_NOFS);
 out_page:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        kfree(fixup);
 }
 
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
                return -EAGAIN;
 
        SetPageChecked(page);
-       page_cache_get(page);
+       get_page(page);
        btrfs_init_work(&fixup->work, btrfs_fixup_helper,
                        btrfs_writepage_fixup_worker, NULL, NULL);
        fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
 
        if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
                loff_t offset = new_size;
-               loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
+               loff_t page_end = ALIGN(offset, PAGE_SIZE);
 
                /*
                 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
        struct extent_state *cached_state = NULL;
        char *kaddr;
        u32 blocksize = root->sectorsize;
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
+       pgoff_t index = from >> PAGE_SHIFT;
        unsigned offset = from & (blocksize - 1);
        struct page *page;
        gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
                lock_page(page);
                if (page->mapping != mapping) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto again;
                }
                if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
                unlock_extent_cached(io_tree, block_start, block_end,
                                     &cached_state, GFP_NOFS);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                btrfs_start_ordered_extent(inode, ordered, 1);
                btrfs_put_ordered_extent(ordered);
                goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
                btrfs_delalloc_release_space(inode, block_start,
                                             blocksize);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 out:
        return ret;
 }
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
 
        read_extent_buffer(leaf, tmp, ptr, inline_size);
 
-       max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+       max_size = min_t(unsigned long, PAGE_SIZE, max_size);
        ret = btrfs_decompress(compress_type, tmp, page,
                               extent_offset, inline_size, max_size);
        kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
 
                size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
                extent_offset = page_offset(page) + pg_offset - extent_start;
-               copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
-                               size - extent_offset);
+               copy_size = min_t(u64, PAGE_SIZE - pg_offset,
+                                 size - extent_offset);
                em->start = extent_start + extent_offset;
                em->len = ALIGN(copy_size, root->sectorsize);
                em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
                                map = kmap(page);
                                read_extent_buffer(leaf, map + pg_offset, ptr,
                                                   copy_size);
-                               if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
+                               if (pg_offset + copy_size < PAGE_SIZE) {
                                        memset(map + pg_offset + copy_size, 0,
-                                              PAGE_CACHE_SIZE - pg_offset -
+                                              PAGE_SIZE - pg_offset -
                                               copy_size);
                                }
                                kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
        int start_idx;
        int end_idx;
 
-       start_idx = start >> PAGE_CACHE_SHIFT;
+       start_idx = start >> PAGE_SHIFT;
 
        /*
         * end is the last byte in the last page.  end == start is legal
         */
-       end_idx = end >> PAGE_CACHE_SHIFT;
+       end_idx = end >> PAGE_SHIFT;
 
        rcu_read_lock();
 
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
                 * include/linux/pagemap.h for details.
                 */
                if (unlikely(page != *pagep)) {
-                       page_cache_release(page);
+                       put_page(page);
                        page = NULL;
                }
        }
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
        if (page) {
                if (page->index <= end_idx)
                        found = true;
-               page_cache_release(page);
+               put_page(page);
        }
 
        rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
        if (ret == 1) {
                ClearPagePrivate(page);
                set_page_private(page, 0);
-               page_cache_release(page);
+               put_page(page);
        }
        return ret;
 }
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
-       u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+       u64 page_end = page_start + PAGE_SIZE - 1;
        u64 start;
        u64 end;
        int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
         * 2) Not written to disk
         *    This means the reserved space should be freed here.
         */
-       btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
+       btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
        if (!inode_evicting) {
                clear_extent_bit(tree, page_start, page_end,
                                 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
        if (PagePrivate(page)) {
                ClearPagePrivate(page);
                set_page_private(page, 0);
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_end;
        u64 end;
 
-       reserved_space = PAGE_CACHE_SIZE;
+       reserved_space = PAGE_SIZE;
 
        sb_start_pagefault(inode->i_sb);
        page_start = page_offset(page);
-       page_end = page_start + PAGE_CACHE_SIZE - 1;
+       page_end = page_start + PAGE_SIZE - 1;
        end = page_end;
 
        /*
@@ -8934,15 +8934,15 @@ again:
                goto again;
        }
 
-       if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
+       if (page->index == ((size - 1) >> PAGE_SHIFT)) {
                reserved_space = round_up(size - page_start, root->sectorsize);
-               if (reserved_space < PAGE_CACHE_SIZE) {
+               if (reserved_space < PAGE_SIZE) {
                        end = page_start + reserved_space - 1;
                        spin_lock(&BTRFS_I(inode)->lock);
                        BTRFS_I(inode)->outstanding_extents++;
                        spin_unlock(&BTRFS_I(inode)->lock);
                        btrfs_delalloc_release_space(inode, page_start,
-                                               PAGE_CACHE_SIZE - reserved_space);
+                                               PAGE_SIZE - reserved_space);
                }
        }
 
@@ -8969,14 +8969,14 @@ again:
        ret = 0;
 
        /* page is wholly or partially inside EOF */
-       if (page_start + PAGE_CACHE_SIZE > size)
-               zero_start = size & ~PAGE_CACHE_MASK;
+       if (page_start + PAGE_SIZE > size)
+               zero_start = size & ~PAGE_MASK;
        else
-               zero_start = PAGE_CACHE_SIZE;
+               zero_start = PAGE_SIZE;
 
-       if (zero_start != PAGE_CACHE_SIZE) {
+       if (zero_start != PAGE_SIZE) {
                kaddr = kmap(page);
-               memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
+               memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
                flush_dcache_page(page);
                kunmap(page);
        }
index 053e677839fef37121a6a958aa02c883a20590da..5a23806ae418af8e3952b4cbf65df06aae384b57 100644 (file)
@@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
        u64 end;
 
        read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+       em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
        read_unlock(&em_tree->lock);
 
        if (em) {
@@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_map *em;
-       u64 len = PAGE_CACHE_SIZE;
+       u64 len = PAGE_SIZE;
 
        /*
         * hopefully we have this extent in the tree already, try without
@@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode,
        struct extent_io_tree *tree;
        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 
-       file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+       file_end = (isize - 1) >> PAGE_SHIFT;
        if (!isize || start_index > file_end)
                return 0;
 
        page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
 
        ret = btrfs_delalloc_reserve_space(inode,
-                       start_index << PAGE_CACHE_SHIFT,
-                       page_cnt << PAGE_CACHE_SHIFT);
+                       start_index << PAGE_SHIFT,
+                       page_cnt << PAGE_SHIFT);
        if (ret)
                return ret;
        i_done = 0;
@@ -1148,7 +1148,7 @@ again:
                        break;
 
                page_start = page_offset(page);
-               page_end = page_start + PAGE_CACHE_SIZE - 1;
+               page_end = page_start + PAGE_SIZE - 1;
                while (1) {
                        lock_extent_bits(tree, page_start, page_end,
                                         &cached_state);
@@ -1169,7 +1169,7 @@ again:
                         */
                        if (page->mapping != inode->i_mapping) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                goto again;
                        }
                }
@@ -1179,7 +1179,7 @@ again:
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                ret = -EIO;
                                break;
                        }
@@ -1187,7 +1187,7 @@ again:
 
                if (page->mapping != inode->i_mapping) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto again;
                }
 
@@ -1208,7 +1208,7 @@ again:
                wait_on_page_writeback(pages[i]);
 
        page_start = page_offset(pages[0]);
-       page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
+       page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree,
                         page_start, page_end - 1, &cached_state);
@@ -1222,8 +1222,8 @@ again:
                BTRFS_I(inode)->outstanding_extents++;
                spin_unlock(&BTRFS_I(inode)->lock);
                btrfs_delalloc_release_space(inode,
-                               start_index << PAGE_CACHE_SHIFT,
-                               (page_cnt - i_done) << PAGE_CACHE_SHIFT);
+                               start_index << PAGE_SHIFT,
+                               (page_cnt - i_done) << PAGE_SHIFT);
        }
 
 
@@ -1240,17 +1240,17 @@ again:
                set_page_extent_mapped(pages[i]);
                set_page_dirty(pages[i]);
                unlock_page(pages[i]);
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
        return i_done;
 out:
        for (i = 0; i < i_done; i++) {
                unlock_page(pages[i]);
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
        }
        btrfs_delalloc_release_space(inode,
-                       start_index << PAGE_CACHE_SHIFT,
-                       page_cnt << PAGE_CACHE_SHIFT);
+                       start_index << PAGE_SHIFT,
+                       page_cnt << PAGE_SHIFT);
        return ret;
 
 }
@@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
        int defrag_count = 0;
        int compress_type = BTRFS_COMPRESS_ZLIB;
        u32 extent_thresh = range->extent_thresh;
-       unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
+       unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
        unsigned long cluster = max_cluster;
        u64 new_align = ~((u64)SZ_128K - 1);
        struct page **pages = NULL;
@@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
        /* find the last page to defrag */
        if (range->start + range->len > range->start) {
                last_index = min_t(u64, isize - 1,
-                        range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
+                        range->start + range->len - 1) >> PAGE_SHIFT;
        } else {
-               last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+               last_index = (isize - 1) >> PAGE_SHIFT;
        }
 
        if (newer_than) {
@@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                         * we always align our defrag to help keep
                         * the extents in the file evenly spaced
                         */
-                       i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+                       i = (newer_off & new_align) >> PAGE_SHIFT;
                } else
                        goto out_ra;
        } else {
-               i = range->start >> PAGE_CACHE_SHIFT;
+               i = range->start >> PAGE_SHIFT;
        }
        if (!max_to_defrag)
                max_to_defrag = last_index - i + 1;
@@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                inode->i_mapping->writeback_index = i;
 
        while (i <= last_index && defrag_count < max_to_defrag &&
-              (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
+              (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
                /*
                 * make sure we stop running if someone unmounts
                 * the FS
@@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                        break;
                }
 
-               if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+               if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
                                         extent_thresh, &last_len, &skip,
                                         &defrag_end, range->flags &
                                         BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                         * the should_defrag function tells us how much to skip
                         * bump our counter by the suggested amount
                         */
-                       next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
+                       next = DIV_ROUND_UP(skip, PAGE_SIZE);
                        i = max(i + 1, next);
                        continue;
                }
 
                if (!newer_than) {
-                       cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
-                                  PAGE_CACHE_SHIFT) - i;
+                       cluster = (PAGE_ALIGN(defrag_end) >>
+                                  PAGE_SHIFT) - i;
                        cluster = min(cluster, max_cluster);
                } else {
                        cluster = max_cluster;
@@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                                i += ret;
 
                        newer_off = max(newer_off + 1,
-                                       (u64)i << PAGE_CACHE_SHIFT);
+                                       (u64)i << PAGE_SHIFT);
 
                        ret = find_new_extents(root, inode, newer_than,
                                               &newer_off, SZ_64K);
                        if (!ret) {
                                range->start = newer_off;
-                               i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+                               i = (newer_off & new_align) >> PAGE_SHIFT;
                        } else {
                                break;
                        }
                } else {
                        if (ret > 0) {
                                i += ret;
-                               last_len += ret << PAGE_CACHE_SHIFT;
+                               last_len += ret << PAGE_SHIFT;
                        } else {
                                i++;
                                last_len = 0;
@@ -1654,7 +1654,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
 
                src_inode = file_inode(src.file);
                if (src_inode->i_sb != file_inode(file)->i_sb) {
-                       btrfs_info(BTRFS_I(src_inode)->root->fs_info,
+                       btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
                                   "Snapshot src from another FS");
                        ret = -EXDEV;
                } else if (!inode_owner_or_capable(src_inode)) {
@@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
                readonly = true;
        if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
-               if (vol_args->size > PAGE_CACHE_SIZE) {
+               if (vol_args->size > PAGE_SIZE) {
                        ret = -EINVAL;
                        goto free_args;
                }
@@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
                lock_page(page);
                if (!PageUptodate(page)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        return ERR_PTR(-EIO);
                }
                if (page->mapping != inode->i_mapping) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        return ERR_PTR(-EAGAIN);
                }
        }
@@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
                               int num_pages, u64 off)
 {
        int i;
-       pgoff_t index = off >> PAGE_CACHE_SHIFT;
+       pgoff_t index = off >> PAGE_SHIFT;
 
        for (i = 0; i < num_pages; i++) {
 again:
@@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
                pg = cmp->src_pages[i];
                if (pg) {
                        unlock_page(pg);
-                       page_cache_release(pg);
+                       put_page(pg);
                }
                pg = cmp->dst_pages[i];
                if (pg) {
                        unlock_page(pg);
-                       page_cache_release(pg);
+                       put_page(pg);
                }
        }
        kfree(cmp->src_pages);
@@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
                                  u64 len, struct cmp_pages *cmp)
 {
        int ret;
-       int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+       int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
        struct page **src_pgarr, **dst_pgarr;
 
        /*
@@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
        int ret = 0;
        int i;
        struct page *src_page, *dst_page;
-       unsigned int cmp_len = PAGE_CACHE_SIZE;
+       unsigned int cmp_len = PAGE_SIZE;
        void *addr, *dst_addr;
 
        i = 0;
        while (len) {
-               if (len < PAGE_CACHE_SIZE)
+               if (len < PAGE_SIZE)
                        cmp_len = len;
 
                BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
        if (olen > BTRFS_MAX_DEDUPE_LEN)
                olen = BTRFS_MAX_DEDUPE_LEN;
 
-       if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) {
+       if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
                /*
                 * Btrfs does not support blocksize < page_size. As a
                 * result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
         * data immediately and not the previous data.
         */
        truncate_inode_pages_range(&inode->i_data,
-                               round_down(destoff, PAGE_CACHE_SIZE),
-                               round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
+                               round_down(destoff, PAGE_SIZE),
+                               round_up(destoff + len, PAGE_SIZE) - 1);
 out_unlock:
        if (!same_inode)
                btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        /* we generally have at most 6 or so space infos, one for each raid
         * level.  So, a whole page should be more than enough for everyone
         */
-       if (alloc_size > PAGE_CACHE_SIZE)
+       if (alloc_size > PAGE_SIZE)
                return -ENOMEM;
 
        space_args.total_spaces = 0;
index a2f0513477313fb6edbc4f898df70262eda96aef..1adfbe7be6b806c681068c393ea32f6cde8bdf83 100644 (file)
@@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void)
                return ERR_PTR(-ENOMEM);
 
        workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
-       workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
-       workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
+       workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
+       workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
        if (!workspace->mem || !workspace->buf || !workspace->cbuf)
                goto fail;
 
@@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws,
        *total_out = 0;
        *total_in = 0;
 
-       in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+       in_page = find_get_page(mapping, start >> PAGE_SHIFT);
        data_in = kmap(in_page);
 
        /*
@@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws,
        tot_out = LZO_LEN;
        pages[0] = out_page;
        nr_pages = 1;
-       pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+       pg_bytes_left = PAGE_SIZE - LZO_LEN;
 
        /* compress at most one page of data each time */
-       in_len = min(len, PAGE_CACHE_SIZE);
+       in_len = min(len, PAGE_SIZE);
        while (tot_in < len) {
                ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
                                       &out_len, workspace->mem);
@@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws,
                                cpage_out = kmap(out_page);
                                pages[nr_pages++] = out_page;
 
-                               pg_bytes_left = PAGE_CACHE_SIZE;
+                               pg_bytes_left = PAGE_SIZE;
                                out_offset = 0;
                        }
                }
@@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws,
 
                bytes_left = len - tot_in;
                kunmap(in_page);
-               page_cache_release(in_page);
+               put_page(in_page);
 
-               start += PAGE_CACHE_SIZE;
-               in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+               start += PAGE_SIZE;
+               in_page = find_get_page(mapping, start >> PAGE_SHIFT);
                data_in = kmap(in_page);
-               in_len = min(bytes_left, PAGE_CACHE_SIZE);
+               in_len = min(bytes_left, PAGE_SIZE);
        }
 
        if (tot_out > tot_in)
@@ -248,7 +248,7 @@ out:
 
        if (in_page) {
                kunmap(in_page);
-               page_cache_release(in_page);
+               put_page(in_page);
        }
 
        return ret;
@@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
        char *data_in;
        unsigned long page_in_index = 0;
        unsigned long page_out_index = 0;
-       unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+       unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
        unsigned long buf_start;
        unsigned long buf_offset = 0;
        unsigned long bytes;
@@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
        tot_in = LZO_LEN;
        in_offset = LZO_LEN;
        tot_len = min_t(size_t, srclen, tot_len);
-       in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+       in_page_bytes_left = PAGE_SIZE - LZO_LEN;
 
        tot_out = 0;
        pg_offset = 0;
@@ -345,12 +345,12 @@ cont:
 
                                data_in = kmap(pages_in[++page_in_index]);
 
-                               in_page_bytes_left = PAGE_CACHE_SIZE;
+                               in_page_bytes_left = PAGE_SIZE;
                                in_offset = 0;
                        }
                }
 
-               out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
+               out_len = lzo1x_worst_compress(PAGE_SIZE);
                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
                                            &out_len);
                if (need_unmap)
@@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
        in_len = read_compress_length(data_in);
        data_in += LZO_LEN;
 
-       out_len = PAGE_CACHE_SIZE;
+       out_len = PAGE_SIZE;
        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
        if (ret != LZO_E_OK) {
                printk(KERN_WARNING "BTRFS: decompress failed!\n");
index 5279fdae7142fbe3177a556a020ed1af3a7aa8f1..9e119552ed32cb4236eb3933fb9e7e9d104b77af 100644 (file)
@@ -1463,6 +1463,7 @@ struct btrfs_qgroup_extent_record
        u64 bytenr = record->bytenr;
 
        assert_spin_locked(&delayed_refs->lock);
+       trace_btrfs_qgroup_insert_dirty_extent(record);
 
        while (*p) {
                parent_node = *p;
@@ -1594,6 +1595,9 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
                cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
                cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
 
+               trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
+                                            cur_new_count);
+
                /* Rfer update part */
                if (cur_old_count == 0 && cur_new_count > 0) {
                        qg->rfer += num_bytes;
@@ -1683,6 +1687,9 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
                goto out_free;
        BUG_ON(!fs_info->quota_root);
 
+       trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
+                                         nr_new_roots);
+
        qgroups = ulist_alloc(GFP_NOFS);
        if (!qgroups) {
                ret = -ENOMEM;
@@ -1752,6 +1759,8 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
                record = rb_entry(node, struct btrfs_qgroup_extent_record,
                                  node);
 
+               trace_btrfs_qgroup_account_extents(record);
+
                if (!ret) {
                        /*
                         * Use (u64)-1 as time_seq to do special search, which
@@ -1842,8 +1851,10 @@ out:
 }
 
 /*
- * copy the acounting information between qgroups. This is necessary when a
- * snapshot or a subvolume is created
+ * Copy the acounting information between qgroups. This is necessary
+ * when a snapshot or a subvolume is created. Throwing an error will
+ * cause a transaction abort so we take extra care here to only error
+ * when a readonly fs is a reasonable outcome.
  */
 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
@@ -1873,15 +1884,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
                       2 * inherit->num_excl_copies;
                for (i = 0; i < nums; ++i) {
                        srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
-                       if (!srcgroup) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
 
-                       if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
+                       /*
+                        * Zero out invalid groups so we can ignore
+                        * them later.
+                        */
+                       if (!srcgroup ||
+                           ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
+                               *i_qgroups = 0ULL;
+
                        ++i_qgroups;
                }
        }
@@ -1916,17 +1927,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
         */
        if (inherit) {
                i_qgroups = (u64 *)(inherit + 1);
-               for (i = 0; i < inherit->num_qgroups; ++i) {
+               for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
+                       if (*i_qgroups == 0)
+                               continue;
                        ret = add_qgroup_relation_item(trans, quota_root,
                                                       objectid, *i_qgroups);
-                       if (ret)
+                       if (ret && ret != -EEXIST)
                                goto out;
                        ret = add_qgroup_relation_item(trans, quota_root,
                                                       *i_qgroups, objectid);
-                       if (ret)
+                       if (ret && ret != -EEXIST)
                                goto out;
-                       ++i_qgroups;
                }
+               ret = 0;
        }
 
 
@@ -1987,17 +2000,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 
        i_qgroups = (u64 *)(inherit + 1);
        for (i = 0; i < inherit->num_qgroups; ++i) {
-               ret = add_relation_rb(quota_root->fs_info, objectid,
-                                     *i_qgroups);
-               if (ret)
-                       goto unlock;
+               if (*i_qgroups) {
+                       ret = add_relation_rb(quota_root->fs_info, objectid,
+                                             *i_qgroups);
+                       if (ret)
+                               goto unlock;
+               }
                ++i_qgroups;
        }
 
-       for (i = 0; i <  inherit->num_ref_copies; ++i) {
+       for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
                struct btrfs_qgroup *src;
                struct btrfs_qgroup *dst;
 
+               if (!i_qgroups[0] || !i_qgroups[1])
+                       continue;
+
                src = find_qgroup_rb(fs_info, i_qgroups[0]);
                dst = find_qgroup_rb(fs_info, i_qgroups[1]);
 
@@ -2008,12 +2026,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 
                dst->rfer = src->rfer - level_size;
                dst->rfer_cmpr = src->rfer_cmpr - level_size;
-               i_qgroups += 2;
        }
-       for (i = 0; i <  inherit->num_excl_copies; ++i) {
+       for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
                struct btrfs_qgroup *src;
                struct btrfs_qgroup *dst;
 
+               if (!i_qgroups[0] || !i_qgroups[1])
+                       continue;
+
                src = find_qgroup_rb(fs_info, i_qgroups[0]);
                dst = find_qgroup_rb(fs_info, i_qgroups[1]);
 
@@ -2024,7 +2044,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 
                dst->excl = src->excl + level_size;
                dst->excl_cmpr = src->excl_cmpr + level_size;
-               i_qgroups += 2;
        }
 
 unlock:
index 55161369fab14d7d2381c2c3950a576c895be2e8..0b7792e02dd5a65f284f6f213f61822b3b34e6cf 100644 (file)
@@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
                s = kmap(rbio->bio_pages[i]);
                d = kmap(rbio->stripe_pages[i]);
 
-               memcpy(d, s, PAGE_CACHE_SIZE);
+               memcpy(d, s, PAGE_SIZE);
 
                kunmap(rbio->bio_pages[i]);
                kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
  */
 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 {
-       return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
+       return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 }
 
 /*
@@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        u64 disk_start;
 
        stripe = &rbio->bbio->stripes[stripe_nr];
-       disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+       disk_start = stripe->physical + (page_index << PAGE_SHIFT);
 
        /* if the device is missing, just fail this stripe */
        if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
                if (last_end == disk_start && stripe->dev->bdev &&
                    !last->bi_error &&
                    last->bi_bdev == stripe->dev->bdev) {
-                       ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
-                       if (ret == PAGE_CACHE_SIZE)
+                       ret = bio_add_page(last, page, PAGE_SIZE, 0);
+                       if (ret == PAGE_SIZE)
                                return 0;
                }
        }
@@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        bio->bi_bdev = stripe->dev->bdev;
        bio->bi_iter.bi_sector = disk_start >> 9;
 
-       bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+       bio_add_page(bio, page, PAGE_SIZE, 0);
        bio_list_add(bio_list, bio);
        return 0;
 }
@@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
        bio_list_for_each(bio, &rbio->bio_list) {
                start = (u64)bio->bi_iter.bi_sector << 9;
                stripe_offset = start - rbio->bbio->raid_map[0];
-               page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+               page_index = stripe_offset >> PAGE_SHIFT;
 
                for (i = 0; i < bio->bi_vcnt; i++) {
                        p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
                } else {
                        /* raid5 */
                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
-                       run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+                       run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
                }
 
 
@@ -1914,7 +1914,7 @@ pstripe:
                        /* Copy parity block into failed block to start with */
                        memcpy(pointers[faila],
                               pointers[rbio->nr_data],
-                              PAGE_CACHE_SIZE);
+                              PAGE_SIZE);
 
                        /* rearrange the pointer array */
                        p = pointers[faila];
@@ -1923,7 +1923,7 @@ pstripe:
                        pointers[rbio->nr_data - 1] = p;
 
                        /* xor in the rest */
-                       run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+                       run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
                }
                /* if we're doing this rebuild as part of an rmw, go through
                 * and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
        ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
                                rbio->stripe_len * rbio->nr_data);
        stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
-       index = stripe_offset >> PAGE_CACHE_SHIFT;
+       index = stripe_offset >> PAGE_SHIFT;
        rbio->bio_pages[index] = page;
 }
 
@@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                } else {
                        /* raid5 */
                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
-                       run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+                       run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
                }
 
                /* Check scrubbing pairty and repair it */
                p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
                parity = kmap(p);
-               if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
-                       memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
+               if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
+                       memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
                else
                        /* Parity is right, needn't writeback */
                        bitmap_clear(rbio->dbitmap, pagenr, 1);
index b892914968c1852751463d9d14eb5023fbb99df7..298631eaee78c1ba26a05e4d97979edbc425f3a7 100644 (file)
@@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
        /* find extent */
        spin_lock(&fs_info->reada_lock);
        re = radix_tree_lookup(&fs_info->reada_tree,
-                              start >> PAGE_CACHE_SHIFT);
+                              start >> PAGE_SHIFT);
        if (re)
                re->refcnt++;
        spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
        zone = NULL;
        spin_lock(&fs_info->reada_lock);
        ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-                                    logical >> PAGE_CACHE_SHIFT, 1);
+                                    logical >> PAGE_SHIFT, 1);
        if (ret == 1 && logical >= zone->start && logical <= zone->end) {
                kref_get(&zone->refcnt);
                spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
 
        spin_lock(&fs_info->reada_lock);
        ret = radix_tree_insert(&dev->reada_zones,
-                               (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
+                               (unsigned long)(zone->end >> PAGE_SHIFT),
                                zone);
 
        if (ret == -EEXIST) {
                kfree(zone);
                ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-                                            logical >> PAGE_CACHE_SHIFT, 1);
+                                            logical >> PAGE_SHIFT, 1);
                if (ret == 1 && logical >= zone->start && logical <= zone->end)
                        kref_get(&zone->refcnt);
                else
@@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        u64 length;
        int real_stripes;
        int nzones = 0;
-       unsigned long index = logical >> PAGE_CACHE_SHIFT;
+       unsigned long index = logical >> PAGE_SHIFT;
        int dev_replace_is_ongoing;
        int have_zone = 0;
 
@@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
                             struct reada_extent *re)
 {
        int i;
-       unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
+       unsigned long index = re->logical >> PAGE_SHIFT;
 
        spin_lock(&fs_info->reada_lock);
        if (--re->refcnt) {
@@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref)
        struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
 
        radix_tree_delete(&zone->device->reada_zones,
-                         zone->end >> PAGE_CACHE_SHIFT);
+                         zone->end >> PAGE_SHIFT);
 
        kfree(zone);
 }
@@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
 {
        int i;
-       unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
+       unsigned long index = zone->end >> PAGE_SHIFT;
 
        for (i = 0; i < zone->ndevs; ++i) {
                struct reada_zone *peer;
@@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
                                             (void **)&zone, index, 1);
                if (ret == 0)
                        break;
-               index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+               index = (zone->end >> PAGE_SHIFT) + 1;
                if (zone->locked) {
                        if (zone->elems > top_locked_elems) {
                                top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
         * plugging to speed things up
         */
        ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-                                    dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+                                    dev->reada_next >> PAGE_SHIFT, 1);
        if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
                ret = reada_pick_zone(dev);
                if (!ret) {
@@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                }
                re = NULL;
                ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-                                       dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+                                       dev->reada_next >> PAGE_SHIFT, 1);
        }
        if (ret == 0) {
                spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                printk(KERN_CONT " curr off %llu",
                                        device->reada_next - zone->start);
                        printk(KERN_CONT "\n");
-                       index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+                       index = (zone->end >> PAGE_SHIFT) + 1;
                }
                cnt = 0;
                index = 0;
@@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                }
                        }
                        printk(KERN_CONT "\n");
-                       index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+                       index = (re->logical >> PAGE_SHIFT) + 1;
                        if (++cnt > 15)
                                break;
                }
@@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                if (ret == 0)
                        break;
                if (!re->scheduled) {
-                       index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+                       index = (re->logical >> PAGE_SHIFT) + 1;
                        continue;
                }
                printk(KERN_DEBUG
@@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                        }
                }
                printk(KERN_CONT "\n");
-               index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+               index = (re->logical >> PAGE_SHIFT) + 1;
        }
        spin_unlock(&fs_info->reada_lock);
 }
index 2bd0011450df2715ca926a6ccd2d99cb7d40f8b4..08ef890deca69fdce7a35d26bcf5ae45d9f67d8b 100644 (file)
@@ -1850,6 +1850,7 @@ again:
                        eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
                        if (IS_ERR(eb)) {
                                ret = PTR_ERR(eb);
+                               break;
                        } else if (!extent_buffer_uptodate(eb)) {
                                ret = -EIO;
                                free_extent_buffer(eb);
@@ -3129,10 +3130,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
        if (ret)
                goto out;
 
-       index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
-       last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
+       index = (cluster->start - offset) >> PAGE_SHIFT;
+       last_index = (cluster->end - offset) >> PAGE_SHIFT;
        while (index <= last_index) {
-               ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+               ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
                if (ret)
                        goto out;
 
@@ -3145,7 +3146,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                                                   mask);
                        if (!page) {
                                btrfs_delalloc_release_metadata(inode,
-                                                       PAGE_CACHE_SIZE);
+                                                       PAGE_SIZE);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -3162,16 +3163,16 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                btrfs_delalloc_release_metadata(inode,
-                                                       PAGE_CACHE_SIZE);
+                                                       PAGE_SIZE);
                                ret = -EIO;
                                goto out;
                        }
                }
 
                page_start = page_offset(page);
-               page_end = page_start + PAGE_CACHE_SIZE - 1;
+               page_end = page_start + PAGE_SIZE - 1;
 
                lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
 
@@ -3191,7 +3192,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                unlock_extent(&BTRFS_I(inode)->io_tree,
                              page_start, page_end);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                index++;
                balance_dirty_pages_ratelimited(inode->i_mapping);
index 39dbdcbf4d134b6104328cbbdfbc98289b68bdfe..4678f03e878e6b7050e28314432fa120a76f5d67 100644 (file)
@@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
-       index = offset >> PAGE_CACHE_SHIFT;
+       index = offset >> PAGE_SHIFT;
 
        page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
        if (!page) {
@@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
        if (spage->io_error) {
                void *mapped_buffer = kmap_atomic(spage->page);
 
-               memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
+               memset(mapped_buffer, 0, PAGE_SIZE);
                flush_dcache_page(spage->page);
                kunmap_atomic(mapped_buffer);
        }
@@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
                goto out;
        }
 
-       while (len >= PAGE_CACHE_SIZE) {
-               index = offset >> PAGE_CACHE_SHIFT;
+       while (len >= PAGE_SIZE) {
+               index = offset >> PAGE_SHIFT;
 again:
                page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
                if (!page) {
@@ -4326,7 +4326,7 @@ again:
                         */
                        if (page->mapping != inode->i_mapping) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                goto again;
                        }
                        if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@ again:
                        ret = err;
 next_page:
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                if (ret)
                        break;
 
-               offset += PAGE_CACHE_SIZE;
-               physical_for_dev_replace += PAGE_CACHE_SIZE;
-               nocow_ctx_logical += PAGE_CACHE_SIZE;
-               len -= PAGE_CACHE_SIZE;
+               offset += PAGE_SIZE;
+               physical_for_dev_replace += PAGE_SIZE;
+               nocow_ctx_logical += PAGE_SIZE;
+               len -= PAGE_SIZE;
        }
        ret = COPY_COMPLETE;
 out:
@@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
-       ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
-       if (ret != PAGE_CACHE_SIZE) {
+       ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+       if (ret != PAGE_SIZE) {
 leave_with_eio:
                bio_put(bio);
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
index 19b7bf4284ee9486964f1929930bd9e23d43a607..8d358c547c59fd8d010d27cf1180a04d51be5fe5 100644 (file)
@@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
        struct page *page;
        char *addr;
        struct btrfs_key key;
-       pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+       pgoff_t index = offset >> PAGE_SHIFT;
        pgoff_t last_index;
-       unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
+       unsigned pg_offset = offset & ~PAGE_MASK;
        ssize_t ret = 0;
 
        key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
        if (len == 0)
                goto out;
 
-       last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+       last_index = (offset + len - 1) >> PAGE_SHIFT;
 
        /* initial readahead */
        memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
 
        while (index <= last_index) {
                unsigned cur_len = min_t(unsigned, len,
-                                        PAGE_CACHE_SIZE - pg_offset);
+                                        PAGE_SIZE - pg_offset);
                page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        ret = -ENOMEM;
@@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                ret = -EIO;
                                break;
                        }
@@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
                memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
                kunmap(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                index++;
                pg_offset = 0;
                len -= cur_len;
@@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx,
                type = btrfs_file_extent_type(leaf, ei);
                if (type == BTRFS_FILE_EXTENT_INLINE) {
                        ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
-                       ext_len = PAGE_CACHE_ALIGN(ext_len);
+                       ext_len = PAGE_ALIGN(ext_len);
                } else {
                        ext_len = btrfs_file_extent_num_bytes(leaf, ei);
                }
@@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
                 * but there may be items after this page.  Make
                 * sure to send the whole thing
                 */
-               len = PAGE_CACHE_ALIGN(len);
+               len = PAGE_ALIGN(len);
        } else {
                len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
        }
index b976597b07216c9cc17f87b40719d56c9b5d4dc8..e05619f241be3b2408490ef8c25d030798737755 100644 (file)
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr,   \
                                                                        \
        if (token && token->kaddr && token->offset <= offset &&         \
            token->eb == eb &&                                          \
-          (token->offset + PAGE_CACHE_SIZE >= offset + size)) {        \
+          (token->offset + PAGE_SIZE >= offset + size)) {      \
                kaddr = token->kaddr;                                   \
                p = kaddr + part_offset - token->offset;                \
                res = get_unaligned_le##bits(p + off);                  \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb,                       \
                                                                        \
        if (token && token->kaddr && token->offset <= offset &&         \
            token->eb == eb &&                                          \
-          (token->offset + PAGE_CACHE_SIZE >= offset + size)) {        \
+          (token->offset + PAGE_SIZE >= offset + size)) {      \
                kaddr = token->kaddr;                                   \
                p = kaddr + part_offset - token->offset;                \
                put_unaligned_le##bits(val, p + off);                   \
index 669b58201e36881fb1434d76fd787a45329f042e..70948b13bc811fbe24592a32fec4d45e4459a4ac 100644 (file)
@@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
 {
        int ret;
        struct page *pages[16];
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       unsigned long index = start >> PAGE_SHIFT;
+       unsigned long end_index = end >> PAGE_SHIFT;
        unsigned long nr_pages = end_index - index + 1;
        int i;
        int count = 0;
@@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
                                count++;
                        if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
                                unlock_page(pages[i]);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                        if (flags & PROCESS_RELEASE)
-                               page_cache_release(pages[i]);
+                               put_page(pages[i]);
                }
                nr_pages -= ret;
                index += ret;
@@ -93,7 +93,7 @@ static int test_find_delalloc(void)
         * everything to make sure our pages don't get evicted and screw up our
         * test.
         */
-       for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
+       for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
                page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@ static int test_find_delalloc(void)
                if (index) {
                        unlock_page(page);
                } else {
-                       page_cache_get(page);
+                       get_page(page);
                        locked_page = page;
                }
        }
@@ -129,7 +129,7 @@ static int test_find_delalloc(void)
        }
        unlock_extent(&tmp, start, end);
        unlock_page(locked_page);
-       page_cache_release(locked_page);
+       put_page(locked_page);
 
        /*
         * Test this scenario
@@ -139,7 +139,7 @@ static int test_find_delalloc(void)
         */
        test_start = SZ_64M;
        locked_page = find_lock_page(inode->i_mapping,
-                                    test_start >> PAGE_CACHE_SHIFT);
+                                    test_start >> PAGE_SHIFT);
        if (!locked_page) {
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
@@ -165,7 +165,7 @@ static int test_find_delalloc(void)
        }
        unlock_extent(&tmp, start, end);
        /* locked_page was unlocked above */
-       page_cache_release(locked_page);
+       put_page(locked_page);
 
        /*
         * Test this scenario
@@ -174,7 +174,7 @@ static int test_find_delalloc(void)
         */
        test_start = max_bytes + 4096;
        locked_page = find_lock_page(inode->i_mapping, test_start >>
-                                    PAGE_CACHE_SHIFT);
+                                    PAGE_SHIFT);
        if (!locked_page) {
                test_msg("Could'nt find the locked page\n");
                goto out_bits;
@@ -225,13 +225,13 @@ static int test_find_delalloc(void)
         * range we want to find.
         */
        page = find_get_page(inode->i_mapping,
-                            (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
+                            (max_bytes + SZ_1M) >> PAGE_SHIFT);
        if (!page) {
                test_msg("Couldn't find our page\n");
                goto out_bits;
        }
        ClearPageDirty(page);
-       page_cache_release(page);
+       put_page(page);
 
        /* We unlocked it in the previous test */
        lock_page(locked_page);
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
        end = 0;
        /*
         * Currently if we fail to find dirty pages in the delalloc range we
-        * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search.  If
+        * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
         * this changes at any point in the future we will need to fix this
         * tests expected behavior.
         */
@@ -249,9 +249,9 @@ static int test_find_delalloc(void)
                test_msg("Didn't find our range\n");
                goto out_bits;
        }
-       if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
+       if (start != test_start && end != test_start + PAGE_SIZE - 1) {
                test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
-                        test_start, test_start + PAGE_CACHE_SIZE - 1, start,
+                        test_start, test_start + PAGE_SIZE - 1, start,
                         end);
                goto out_bits;
        }
@@ -265,7 +265,7 @@ out_bits:
        clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
 out:
        if (locked_page)
-               page_cache_release(locked_page);
+               put_page(locked_page);
        process_page_range(inode, 0, total_dirty - 1,
                           PROCESS_UNLOCK | PROCESS_RELEASE);
        iput(inode);
@@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                return -EINVAL;
        }
 
-       bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+       bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
                   sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+       extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
                                 sizeof(long) * BITS_PER_BYTE);
        if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
                test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 
        bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
        bitmap_clear(bitmap,
-                    (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                    (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
                     sizeof(long) * BITS_PER_BYTE);
        extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-       extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+       extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
                                   sizeof(long) * BITS_PER_BYTE);
        if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
                test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 
 static int test_eb_bitmaps(void)
 {
-       unsigned long len = PAGE_CACHE_SIZE * 4;
+       unsigned long len = PAGE_SIZE * 4;
        unsigned long *bitmap;
        struct extent_buffer *eb;
        int ret;
@@ -379,7 +379,7 @@ static int test_eb_bitmaps(void)
 
        /* Do it over again with an extent buffer which isn't page-aligned. */
        free_extent_buffer(eb);
-       eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
+       eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
        if (!eb) {
                test_msg("Couldn't allocate test extent buffer\n");
                kfree(bitmap);
index c9ad97b1e690900c998ae5d0267622ac6b9f6364..514247515312adc7dbf77f4d5fa154c76a7bcda0 100644 (file)
@@ -22,7 +22,7 @@
 #include "../disk-io.h"
 #include "../free-space-cache.h"
 
-#define BITS_PER_BITMAP                (PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
 
 /*
  * This test just does basic sanity checking, making sure we can add an exten
index 24d03c751149f56e484e32c3b1120dceb495b299..517d0ccb351e205a4f415c15c4735c3a76647b91 100644 (file)
@@ -4415,6 +4415,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/*
+ * When we are logging a new inode X, check if it doesn't have a reference that
+ * matches the reference from some other inode Y created in a past transaction
+ * and that was renamed in the current transaction. If we don't do this, then at
+ * log replay time we can lose inode Y (and all its files if it's a directory):
+ *
+ * mkdir /mnt/x
+ * echo "hello world" > /mnt/x/foobar
+ * sync
+ * mv /mnt/x /mnt/y
+ * mkdir /mnt/x                 # or touch /mnt/x
+ * xfs_io -c fsync /mnt/x
+ * <power fail>
+ * mount fs, trigger log replay
+ *
+ * After the log replay procedure, we would lose the first directory and all its
+ * files (file foobar).
+ * For the case where inode Y is not a directory we simply end up losing it:
+ *
+ * echo "123" > /mnt/foo
+ * sync
+ * mv /mnt/foo /mnt/bar
+ * echo "abc" > /mnt/foo
+ * xfs_io -c fsync /mnt/foo
+ * <power fail>
+ *
+ * We also need this for cases where a snapshot entry is replaced by some other
+ * entry (file or directory) otherwise we end up with an unreplayable log due to
+ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
+ * if it were a regular entry:
+ *
+ * mkdir /mnt/x
+ * btrfs subvolume snapshot /mnt /mnt/x/snap
+ * btrfs subvolume delete /mnt/x/snap
+ * rmdir /mnt/x
+ * mkdir /mnt/x
+ * fsync /mnt/x or fsync some new file inside it
+ * <power fail>
+ *
+ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
+ * the same transaction.
+ */
+static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+                                        const int slot,
+                                        const struct btrfs_key *key,
+                                        struct inode *inode)
+{
+       int ret;
+       struct btrfs_path *search_path;
+       char *name = NULL;
+       u32 name_len = 0;
+       u32 item_size = btrfs_item_size_nr(eb, slot);
+       u32 cur_offset = 0;
+       unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
+
+       search_path = btrfs_alloc_path();
+       if (!search_path)
+               return -ENOMEM;
+       search_path->search_commit_root = 1;
+       search_path->skip_locking = 1;
+
+       while (cur_offset < item_size) {
+               u64 parent;
+               u32 this_name_len;
+               u32 this_len;
+               unsigned long name_ptr;
+               struct btrfs_dir_item *di;
+
+               if (key->type == BTRFS_INODE_REF_KEY) {
+                       struct btrfs_inode_ref *iref;
+
+                       iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
+                       parent = key->offset;
+                       this_name_len = btrfs_inode_ref_name_len(eb, iref);
+                       name_ptr = (unsigned long)(iref + 1);
+                       this_len = sizeof(*iref) + this_name_len;
+               } else {
+                       struct btrfs_inode_extref *extref;
+
+                       extref = (struct btrfs_inode_extref *)(ptr +
+                                                              cur_offset);
+                       parent = btrfs_inode_extref_parent(eb, extref);
+                       this_name_len = btrfs_inode_extref_name_len(eb, extref);
+                       name_ptr = (unsigned long)&extref->name;
+                       this_len = sizeof(*extref) + this_name_len;
+               }
+
+               if (this_name_len > name_len) {
+                       char *new_name;
+
+                       new_name = krealloc(name, this_name_len, GFP_NOFS);
+                       if (!new_name) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       name_len = this_name_len;
+                       name = new_name;
+               }
+
+               read_extent_buffer(eb, name, name_ptr, this_name_len);
+               di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
+                                          search_path, parent,
+                                          name, this_name_len, 0);
+               if (di && !IS_ERR(di)) {
+                       ret = 1;
+                       goto out;
+               } else if (IS_ERR(di)) {
+                       ret = PTR_ERR(di);
+                       goto out;
+               }
+               btrfs_release_path(search_path);
+
+               cur_offset += this_len;
+       }
+       ret = 0;
+out:
+       btrfs_free_path(search_path);
+       kfree(name);
+       return ret;
+}
+
 /* log a single inode in the tree log.
  * At least one parent directory for this inode must exist in the tree
  * or be logged already.
@@ -4602,6 +4723,22 @@ again:
                if (min_key.type == BTRFS_INODE_ITEM_KEY)
                        need_log_inode_item = false;
 
+               if ((min_key.type == BTRFS_INODE_REF_KEY ||
+                    min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+                   BTRFS_I(inode)->generation == trans->transid) {
+                       ret = btrfs_check_ref_name_override(path->nodes[0],
+                                                           path->slots[0],
+                                                           &min_key, inode);
+                       if (ret < 0) {
+                               err = ret;
+                               goto out_unlock;
+                       } else if (ret > 0) {
+                               err = 1;
+                               btrfs_set_log_full_commit(root->fs_info, trans);
+                               goto out_unlock;
+                       }
+               }
+
                /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
                if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
                        if (ins_nr == 0)
index e2b54d546b7c0572c2188d7dfe26c3f96746b337..bd0f45fb38c462bf932462cfda57bcc2baac63cc 100644 (file)
@@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        }
 
        /* make sure our super fits in the device */
-       if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+       if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
                goto error_bdev_put;
 
        /* make sure our super fits in the page */
-       if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+       if (sizeof(*disk_super) > PAGE_SIZE)
                goto error_bdev_put;
 
        /* make sure our super doesn't straddle pages on disk */
-       index = bytenr >> PAGE_CACHE_SHIFT;
-       if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+       index = bytenr >> PAGE_SHIFT;
+       if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
                goto error_bdev_put;
 
        /* pull in the page with our super */
@@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        p = kmap(page);
 
        /* align our pointer to the offset of the super block */
-       disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+       disk_super = p + (bytenr & ~PAGE_MASK);
 
        if (btrfs_super_bytenr(disk_super) != bytenr ||
            btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 
 error_unmap:
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 
 error_bdev_put:
        blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
         * but sb spans only this function. Add an explicit SetPageUptodate call
         * to silence the warning eg. on PowerPC 64.
         */
-       if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
+       if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
                SetPageUptodate(sb->pages[0]);
 
        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
index 82990b8f872b6d455be25196ee88a97eb82bd932..88d274e8ecf227d9eebb1e525a6d328a7dd73d8b 100644 (file)
@@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void)
        workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
                        zlib_inflate_workspacesize());
        workspace->strm.workspace = vmalloc(workspacesize);
-       workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+       workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
        if (!workspace->strm.workspace || !workspace->buf)
                goto fail;
 
@@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws,
        workspace->strm.total_in = 0;
        workspace->strm.total_out = 0;
 
-       in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+       in_page = find_get_page(mapping, start >> PAGE_SHIFT);
        data_in = kmap(in_page);
 
        out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws,
 
        workspace->strm.next_in = data_in;
        workspace->strm.next_out = cpage_out;
-       workspace->strm.avail_out = PAGE_CACHE_SIZE;
-       workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE);
+       workspace->strm.avail_out = PAGE_SIZE;
+       workspace->strm.avail_in = min(len, PAGE_SIZE);
 
        while (workspace->strm.total_in < len) {
                ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws,
                        cpage_out = kmap(out_page);
                        pages[nr_pages] = out_page;
                        nr_pages++;
-                       workspace->strm.avail_out = PAGE_CACHE_SIZE;
+                       workspace->strm.avail_out = PAGE_SIZE;
                        workspace->strm.next_out = cpage_out;
                }
                /* we're all done */
@@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws,
 
                        bytes_left = len - workspace->strm.total_in;
                        kunmap(in_page);
-                       page_cache_release(in_page);
+                       put_page(in_page);
 
-                       start += PAGE_CACHE_SIZE;
+                       start += PAGE_SIZE;
                        in_page = find_get_page(mapping,
-                                               start >> PAGE_CACHE_SHIFT);
+                                               start >> PAGE_SHIFT);
                        data_in = kmap(in_page);
                        workspace->strm.avail_in = min(bytes_left,
-                                                          PAGE_CACHE_SIZE);
+                                                          PAGE_SIZE);
                        workspace->strm.next_in = data_in;
                }
        }
@@ -205,7 +205,7 @@ out:
 
        if (in_page) {
                kunmap(in_page);
-               page_cache_release(in_page);
+               put_page(in_page);
        }
        return ret;
 }
@@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
        size_t total_out = 0;
        unsigned long page_in_index = 0;
        unsigned long page_out_index = 0;
-       unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+       unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
        unsigned long buf_start;
        unsigned long pg_offset;
 
        data_in = kmap(pages_in[page_in_index]);
        workspace->strm.next_in = data_in;
-       workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
+       workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
        workspace->strm.total_in = 0;
 
        workspace->strm.total_out = 0;
        workspace->strm.next_out = workspace->buf;
-       workspace->strm.avail_out = PAGE_CACHE_SIZE;
+       workspace->strm.avail_out = PAGE_SIZE;
        pg_offset = 0;
 
        /* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
                }
 
                workspace->strm.next_out = workspace->buf;
-               workspace->strm.avail_out = PAGE_CACHE_SIZE;
+               workspace->strm.avail_out = PAGE_SIZE;
 
                if (workspace->strm.avail_in == 0) {
                        unsigned long tmp;
@@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
                        workspace->strm.next_in = data_in;
                        tmp = srclen - workspace->strm.total_in;
                        workspace->strm.avail_in = min(tmp,
-                                                          PAGE_CACHE_SIZE);
+                                                          PAGE_SIZE);
                }
        }
        if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
        workspace->strm.total_in = 0;
 
        workspace->strm.next_out = workspace->buf;
-       workspace->strm.avail_out = PAGE_CACHE_SIZE;
+       workspace->strm.avail_out = PAGE_SIZE;
        workspace->strm.total_out = 0;
        /* If it's deflate, and it's got no preset dictionary, then
           we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                else
                        buf_offset = 0;
 
-               bytes = min(PAGE_CACHE_SIZE - pg_offset,
-                           PAGE_CACHE_SIZE - buf_offset);
+               bytes = min(PAGE_SIZE - pg_offset,
+                           PAGE_SIZE - buf_offset);
                bytes = min(bytes, bytes_left);
 
                kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                bytes_left -= bytes;
 next:
                workspace->strm.next_out = workspace->buf;
-               workspace->strm.avail_out = PAGE_CACHE_SIZE;
+               workspace->strm.avail_out = PAGE_SIZE;
        }
 
        if (ret != Z_STREAM_END && bytes_left != 0)
index 33be29675358321117038674a770e814a690e9fb..af0d9a82a8edff4dd279657a56757cb76588e2f0 100644 (file)
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page)
 {
        ClearPagePrivate(page);
        set_page_private(page, 0);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        struct page *page;
        int all_mapped = 1;
 
-       index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+       index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
        page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
        if (!page)
                goto out;
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        }
 out_unlock:
        spin_unlock(&bd_mapping->private_lock);
-       page_cache_release(page);
+       put_page(page);
 out:
        return ret;
 }
@@ -1040,7 +1040,7 @@ done:
        ret = (block < end_block) ? 1 : -ENXIO;
 failed:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return ret;
 }
 
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
        /*
         * Check for overflow
         */
-       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+       BUG_ON(stop > PAGE_SIZE || stop < length);
 
        head = page_buffers(page);
        bh = head;
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        blocksize = bh->b_size;
        bbits = block_size_bits(blocksize);
 
-       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       block = (sector_t)page->index << (PAGE_SHIFT - bbits);
        last_block = (i_size_read(inode) - 1) >> bbits;
 
        /*
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
                get_block_t *get_block)
 {
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        unsigned to = from + len;
        struct inode *inode = page->mapping->host;
        unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
        struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(from > PAGE_CACHE_SIZE);
-       BUG_ON(to > PAGE_CACHE_SIZE);
+       BUG_ON(from > PAGE_SIZE);
+       BUG_ON(to > PAGE_SIZE);
        BUG_ON(from > to);
 
        head = create_page_buffers(page, inode, 0);
        blocksize = head->b_size;
        bbits = block_size_bits(blocksize);
 
-       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       block = (sector_t)page->index << (PAGE_SHIFT - bbits);
 
        for(bh = head, block_start = 0; bh != head || !block_start;
            block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
                unsigned flags, struct page **pagep, get_block_t *get_block)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct page *page;
        int status;
 
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
        status = __block_write_begin(page, pos, len, get_block);
        if (unlikely(status)) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
        }
 
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
        struct inode *inode = mapping->host;
        unsigned start;
 
-       start = pos & (PAGE_CACHE_SIZE - 1);
+       start = pos & (PAGE_SIZE - 1);
 
        if (unlikely(copied < len)) {
                /*
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
        }
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
 
        head = page_buffers(page);
        blocksize = head->b_size;
-       to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
+       to = min_t(unsigned, PAGE_SIZE - from, count);
        to = from + to;
-       if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+       if (from < blocksize && to > PAGE_SIZE - blocksize)
                return 0;
 
        bh = head;
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        blocksize = head->b_size;
        bbits = block_size_bits(blocksize);
 
-       iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
        lblock = (i_size_read(inode)+blocksize-1) >> bbits;
        bh = head;
        nr = 0;
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
        unsigned zerofrom, offset, len;
        int err = 0;
 
-       index = pos >> PAGE_CACHE_SHIFT;
-       offset = pos & ~PAGE_CACHE_MASK;
+       index = pos >> PAGE_SHIFT;
+       offset = pos & ~PAGE_MASK;
 
-       while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
-               zerofrom = curpos & ~PAGE_CACHE_MASK;
+       while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
+               zerofrom = curpos & ~PAGE_MASK;
                if (zerofrom & (blocksize-1)) {
                        *bytes |= (blocksize-1);
                        (*bytes)++;
                }
-               len = PAGE_CACHE_SIZE - zerofrom;
+               len = PAGE_SIZE - zerofrom;
 
                err = pagecache_write_begin(file, mapping, curpos, len,
                                                AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
 
        /* page covers the boundary, find the boundary offset */
        if (index == curidx) {
-               zerofrom = curpos & ~PAGE_CACHE_MASK;
+               zerofrom = curpos & ~PAGE_MASK;
                /* if we will expand the thing last block will be filled */
                if (offset <= zerofrom) {
                        goto out;
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
        if (err)
                return err;
 
-       zerofrom = *bytes & ~PAGE_CACHE_MASK;
+       zerofrom = *bytes & ~PAGE_MASK;
        if (pos+len > *bytes && zerofrom & (blocksize-1)) {
                *bytes |= (blocksize-1);
                (*bytes)++;
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        }
 
        /* page is wholly or partially inside EOF */
-       if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
-               end = size & ~PAGE_CACHE_MASK;
+       if (((page->index + 1) << PAGE_SHIFT) > size)
+               end = size & ~PAGE_MASK;
        else
-               end = PAGE_CACHE_SIZE;
+               end = PAGE_SIZE;
 
        ret = __block_write_begin(page, 0, end, get_block);
        if (!ret)
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping,
        int ret = 0;
        int is_mapped_to_disk = 1;
 
-       index = pos >> PAGE_CACHE_SHIFT;
-       from = pos & (PAGE_CACHE_SIZE - 1);
+       index = pos >> PAGE_SHIFT;
+       from = pos & (PAGE_SIZE - 1);
        to = from + len;
 
        page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping,
                goto out_release;
        }
 
-       block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+       block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 
        /*
         * We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping,
         * page is fully mapped-to-disk.
         */
        for (block_start = 0, block_in_page = 0, bh = head;
-                 block_start < PAGE_CACHE_SIZE;
+                 block_start < PAGE_SIZE;
                  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
                int create;
 
@@ -2623,7 +2623,7 @@ failed:
 
 out_release:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        *pagep = NULL;
 
        return ret;
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
        }
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        while (head) {
                bh = head;
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
 {
        struct inode * const inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
-       const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       const pgoff_t end_index = i_size >> PAGE_SHIFT;
        unsigned offset;
        int ret;
 
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
                goto out;
 
        /* Is the page fully outside i_size? (truncate in progress) */
-       offset = i_size & (PAGE_CACHE_SIZE-1);
+       offset = i_size & (PAGE_SIZE-1);
        if (page->index >= end_index+1 || !offset) {
                /*
                 * The page may have dirty, unmapped buffers.  For example,
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage);
 int nobh_truncate_page(struct address_space *mapping,
                        loff_t from, get_block_t *get_block)
 {
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       pgoff_t index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned blocksize;
        sector_t iblock;
        unsigned length, pos;
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping,
                return 0;
 
        length = blocksize - length;
-       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
 
        page = grab_cache_page(mapping, index);
        err = -ENOMEM;
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping,
        if (page_has_buffers(page)) {
 has_buffers:
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                return block_truncate_page(mapping, from, get_block);
        }
 
@@ -2772,7 +2772,7 @@ has_buffers:
        if (!PageUptodate(page)) {
                err = mapping->a_ops->readpage(NULL, page);
                if (err) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto out;
                }
                lock_page(page);
@@ -2789,7 +2789,7 @@ has_buffers:
 
 unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 out:
        return err;
 }
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page);
 int block_truncate_page(struct address_space *mapping,
                        loff_t from, get_block_t *get_block)
 {
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       pgoff_t index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned blocksize;
        sector_t iblock;
        unsigned length, pos;
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping,
                return 0;
 
        length = blocksize - length;
-       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
        
        page = grab_cache_page(mapping, index);
        err = -ENOMEM;
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping,
 
 unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 out:
        return err;
 }
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 {
        struct inode * const inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
-       const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       const pgoff_t end_index = i_size >> PAGE_SHIFT;
        unsigned offset;
 
        /* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
                                               end_buffer_async_write);
 
        /* Is the page fully outside i_size? (truncate in progress) */
-       offset = i_size & (PAGE_CACHE_SIZE-1);
+       offset = i_size & (PAGE_SIZE-1);
        if (page->index >= end_index+1 || !offset) {
                /*
                 * The page may have dirty, unmapped buffers.  For example,
                 * they may have been added in ext3_writepage().  Make them
                 * freeable here, so the page does not leak.
                 */
-               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               do_invalidatepage(page, 0, PAGE_SIZE);
                unlock_page(page);
                return 0; /* don't care */
        }
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
        return __block_write_full_page(inode, page, get_block, wbc,
                                                        end_buffer_async_write);
 }
index c0f3da3926a0c6d4b8cf95e2c24a2a1ebd9d6655..afbdc418966db8a657ad869d6382201561e8a60d 100644 (file)
@@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
                        error = -EIO;
                }
 
-               page_cache_release(monitor->back_page);
+               put_page(monitor->back_page);
 
                fscache_end_io(op, monitor->netfs_page, error);
-               page_cache_release(monitor->netfs_page);
+               put_page(monitor->netfs_page);
                fscache_retrieval_complete(op, 1);
                fscache_put_retrieval(op);
                kfree(monitor);
@@ -288,8 +288,8 @@ monitor_backing_page:
        _debug("- monitor add");
 
        /* install the monitor */
-       page_cache_get(monitor->netfs_page);
-       page_cache_get(backpage);
+       get_page(monitor->netfs_page);
+       get_page(backpage);
        monitor->back_page = backpage;
        monitor->monitor.private = backpage;
        add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@ backing_page_already_present:
        _debug("- present");
 
        if (newpage) {
-               page_cache_release(newpage);
+               put_page(newpage);
                newpage = NULL;
        }
 
@@ -342,7 +342,7 @@ success:
 
 out:
        if (backpage)
-               page_cache_release(backpage);
+               put_page(backpage);
        if (monitor) {
                fscache_put_retrieval(monitor->op);
                kfree(monitor);
@@ -363,7 +363,7 @@ io_error:
        goto out;
 
 nomem_page:
-       page_cache_release(newpage);
+       put_page(newpage);
 nomem_monitor:
        fscache_put_retrieval(monitor->op);
        kfree(monitor);
@@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                                            netpage->index, cachefiles_gfp);
                if (ret < 0) {
                        if (ret == -EEXIST) {
-                               page_cache_release(netpage);
+                               put_page(netpage);
                                fscache_retrieval_complete(op, 1);
                                continue;
                        }
@@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                }
 
                /* install a monitor */
-               page_cache_get(netpage);
+               get_page(netpage);
                monitor->netfs_page = netpage;
 
-               page_cache_get(backpage);
+               get_page(backpage);
                monitor->back_page = backpage;
                monitor->monitor.private = backpage;
                add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                        unlock_page(backpage);
                }
 
-               page_cache_release(backpage);
+               put_page(backpage);
                backpage = NULL;
 
-               page_cache_release(netpage);
+               put_page(netpage);
                netpage = NULL;
                continue;
 
@@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                                            netpage->index, cachefiles_gfp);
                if (ret < 0) {
                        if (ret == -EEXIST) {
-                               page_cache_release(netpage);
+                               put_page(netpage);
                                fscache_retrieval_complete(op, 1);
                                continue;
                        }
@@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
 
                copy_highpage(netpage, backpage);
 
-               page_cache_release(backpage);
+               put_page(backpage);
                backpage = NULL;
 
                fscache_mark_page_cached(op, netpage);
 
                /* the netpage is unlocked and marked up to date here */
                fscache_end_io(op, netpage, 0);
-               page_cache_release(netpage);
+               put_page(netpage);
                netpage = NULL;
                fscache_retrieval_complete(op, 1);
                continue;
@@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
 out:
        /* tidy up */
        if (newpage)
-               page_cache_release(newpage);
+               put_page(newpage);
        if (netpage)
-               page_cache_release(netpage);
+               put_page(netpage);
        if (backpage)
-               page_cache_release(backpage);
+               put_page(backpage);
        if (monitor) {
                fscache_put_retrieval(op);
                kfree(monitor);
@@ -644,7 +644,7 @@ out:
 
        list_for_each_entry_safe(netpage, _n, list, lru) {
                list_del(&netpage->lru);
-               page_cache_release(netpage);
+               put_page(netpage);
                fscache_retrieval_complete(op, 1);
        }
 
index fc5cae2a0db2dc2a4229f275f8345c6544ea13d6..4801571f51cb4e7461d8d1f9f8f3136fb6713fbc 100644 (file)
@@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
        inode = page->mapping->host;
        ci = ceph_inode(inode);
 
-       if (offset != 0 || length != PAGE_CACHE_SIZE) {
+       if (offset != 0 || length != PAGE_SIZE) {
                dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
                     inode, page, page->index, offset, length);
                return;
@@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page)
                &ceph_inode_to_client(inode)->client->osdc;
        int err = 0;
        u64 off = page_offset(page);
-       u64 len = PAGE_CACHE_SIZE;
+       u64 len = PAGE_SIZE;
 
        if (off >= i_size_read(inode)) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
                return 0;
        }
@@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
                 */
                if (off == 0)
                        return -EINVAL;
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
                return 0;
        }
@@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page)
                ceph_fscache_readpage_cancel(inode, page);
                goto out;
        }
-       if (err < PAGE_CACHE_SIZE)
+       if (err < PAGE_SIZE)
                /* zero fill remainder of page */
-               zero_user_segment(page, err, PAGE_CACHE_SIZE);
+               zero_user_segment(page, err, PAGE_SIZE);
        else
                flush_dcache_page(page);
 
@@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
 
                if (rc < 0 && rc != -ENOENT)
                        goto unlock;
-               if (bytes < (int)PAGE_CACHE_SIZE) {
+               if (bytes < (int)PAGE_SIZE) {
                        /* zero (remainder of) page */
                        int s = bytes < 0 ? 0 : bytes;
-                       zero_user_segment(page, s, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, s, PAGE_SIZE);
                }
                dout("finish_read %p uptodate %p idx %lu\n", inode, page,
                     page->index);
@@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
                ceph_readpage_to_fscache(inode, page);
 unlock:
                unlock_page(page);
-               page_cache_release(page);
-               bytes -= PAGE_CACHE_SIZE;
+               put_page(page);
+               bytes -= PAGE_SIZE;
        }
        kfree(osd_data->pages);
 }
@@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
                if (max && nr_pages == max)
                        break;
        }
-       len = nr_pages << PAGE_CACHE_SHIFT;
+       len = nr_pages << PAGE_SHIFT;
        dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
             off, len);
        vino = ceph_vino(inode);
@@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
                if (add_to_page_cache_lru(page, &inode->i_data, page->index,
                                          GFP_KERNEL)) {
                        ceph_fscache_uncache_page(inode, page);
-                       page_cache_release(page);
+                       put_page(page);
                        dout("start_read %p add_to_page_cache failed %p\n",
                             inode, page);
                        nr_pages = i;
@@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
        if (rc == 0)
                goto out;
 
-       if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
-               max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
+       if (fsc->mount_options->rsize >= PAGE_SIZE)
+               max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
                        >> PAGE_SHIFT;
 
        dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        long writeback_stat;
        u64 truncate_size;
        u32 truncate_seq;
-       int err = 0, len = PAGE_CACHE_SIZE;
+       int err = 0, len = PAGE_SIZE;
 
        dout("writepage %p idx %lu\n", page, page->index);
 
@@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping,
        }
        if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
                wsize = fsc->mount_options->wsize;
-       if (wsize < PAGE_CACHE_SIZE)
-               wsize = PAGE_CACHE_SIZE;
-       max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
+       if (wsize < PAGE_SIZE)
+               wsize = PAGE_SIZE;
+       max_pages_ever = wsize >> PAGE_SHIFT;
 
        pagevec_init(&pvec, 0);
 
@@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping,
                end = -1;
                dout(" cyclic, start at %lu\n", start);
        } else {
-               start = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               start = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
                should_loop = 0;
@@ -887,7 +887,7 @@ get_more_pages:
 
                                num_ops = 1 + do_sync;
                                strip_unit_end = page->index +
-                                       ((len - 1) >> PAGE_CACHE_SHIFT);
+                                       ((len - 1) >> PAGE_SHIFT);
 
                                BUG_ON(pages);
                                max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@ get_more_pages:
 
                                len = 0;
                        } else if (page->index !=
-                                  (offset + len) >> PAGE_CACHE_SHIFT) {
+                                  (offset + len) >> PAGE_SHIFT) {
                                if (num_ops >= (pool ?  CEPH_OSD_SLAB_OPS :
                                                        CEPH_OSD_MAX_OPS)) {
                                        redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@ get_more_pages:
 
                        pages[locked_pages] = page;
                        locked_pages++;
-                       len += PAGE_CACHE_SIZE;
+                       len += PAGE_SIZE;
                }
 
                /* did we get anything? */
@@ -981,7 +981,7 @@ new_request:
                        BUG_ON(IS_ERR(req));
                }
                BUG_ON(len < page_offset(pages[locked_pages - 1]) +
-                            PAGE_CACHE_SIZE - offset);
+                            PAGE_SIZE - offset);
 
                req->r_callback = writepages_finish;
                req->r_inode = inode;
@@ -1011,7 +1011,7 @@ new_request:
                        }
 
                        set_page_writeback(pages[i]);
-                       len += PAGE_CACHE_SIZE;
+                       len += PAGE_SIZE;
                }
 
                if (snap_size != -1) {
@@ -1020,7 +1020,7 @@ new_request:
                        /* writepages_finish() clears writeback pages
                         * according to the data length, so make sure
                         * data length covers all locked pages */
-                       u64 min_len = len + 1 - PAGE_CACHE_SIZE;
+                       u64 min_len = len + 1 - PAGE_SIZE;
                        len = min(len, (u64)i_size_read(inode) - offset);
                        len = max(len, min_len);
                }
@@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file,
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       loff_t page_off = pos & PAGE_CACHE_MASK;
-       int pos_in_page = pos & ~PAGE_CACHE_MASK;
+       loff_t page_off = pos & PAGE_MASK;
+       int pos_in_page = pos & ~PAGE_MASK;
        int end_in_page = pos_in_page + len;
        loff_t i_size;
        int r;
@@ -1191,7 +1191,7 @@ retry_locked:
        }
 
        /* full page? */
-       if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
+       if (pos_in_page == 0 && len == PAGE_SIZE)
                return 0;
 
        /* past end of file? */
@@ -1199,12 +1199,12 @@ retry_locked:
 
        if (page_off >= i_size ||
            (pos_in_page == 0 && (pos+len) >= i_size &&
-            end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
+            end_in_page - pos_in_page != PAGE_SIZE)) {
                dout(" zeroing %p 0 - %d and %d - %d\n",
-                    page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
+                    page, pos_in_page, end_in_page, (int)PAGE_SIZE);
                zero_user_segments(page,
                                   0, pos_in_page,
-                                  end_in_page, PAGE_CACHE_SIZE);
+                                  end_in_page, PAGE_SIZE);
                return 0;
        }
 
@@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = file_inode(file);
        struct page *page;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        int r;
 
        do {
@@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
 
                r = ceph_update_writeable_page(file, pos, len, page);
                if (r < 0)
-                       page_cache_release(page);
+                       put_page(page);
                else
                        *pagep = page;
        } while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
                          struct page *page, void *fsdata)
 {
        struct inode *inode = file_inode(file);
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        int check_cap = 0;
 
        dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
        set_page_dirty(page);
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (check_cap)
                ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_file_info *fi = vma->vm_file->private_data;
        struct page *pinned_page = NULL;
-       loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
+       loff_t off = vmf->pgoff << PAGE_SHIFT;
        int want, got, ret;
 
        dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
-            inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE);
+            inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
        else
@@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                }
        }
        dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
-            inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
+            inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
            ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                ret = -EAGAIN;
 
        dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
-            inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
+            inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
        if (pinned_page)
-               page_cache_release(pinned_page);
+               put_page(pinned_page);
        ceph_put_cap_refs(ci, got);
 
        if (ret != -EAGAIN)
                return ret;
 
        /* read inline data */
-       if (off >= PAGE_CACHE_SIZE) {
+       if (off >= PAGE_SIZE) {
                /* does not support inline data > PAGE_SIZE */
                ret = VM_FAULT_SIGBUS;
        } else {
@@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                                         CEPH_STAT_CAP_INLINE_DATA, true);
                if (ret1 < 0 || off >= i_size_read(inode)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        ret = VM_FAULT_SIGBUS;
                        goto out;
                }
-               if (ret1 < PAGE_CACHE_SIZE)
-                       zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
+               if (ret1 < PAGE_SIZE)
+                       zero_user_segment(page, ret1, PAGE_SIZE);
                else
                        flush_dcache_page(page);
                SetPageUptodate(page);
@@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 out:
        dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
-            inode, off, (size_t)PAGE_CACHE_SIZE, ret);
+            inode, off, (size_t)PAGE_SIZE, ret);
        return ret;
 }
 
@@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                }
        }
 
-       if (off + PAGE_CACHE_SIZE <= size)
-               len = PAGE_CACHE_SIZE;
+       if (off + PAGE_SIZE <= size)
+               len = PAGE_SIZE;
        else
-               len = size & ~PAGE_CACHE_MASK;
+               len = size & ~PAGE_MASK;
 
        dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
             inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                        return;
                if (PageUptodate(page)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        return;
                }
        }
@@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
        }
 
        if (page != locked_page) {
-               if (len < PAGE_CACHE_SIZE)
-                       zero_user_segment(page, len, PAGE_CACHE_SIZE);
+               if (len < PAGE_SIZE)
+                       zero_user_segment(page, len, PAGE_SIZE);
                else
                        flush_dcache_page(page);
 
                SetPageUptodate(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
@@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                from_pagecache = true;
                                lock_page(page);
                        } else {
-                               page_cache_release(page);
+                               put_page(page);
                                page = NULL;
                        }
                }
@@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
 
        if (page) {
                len = i_size_read(inode);
-               if (len > PAGE_CACHE_SIZE)
-                       len = PAGE_CACHE_SIZE;
+               if (len > PAGE_SIZE)
+                       len = PAGE_SIZE;
        } else {
                page = __page_cache_alloc(GFP_NOFS);
                if (!page) {
@@ -1670,7 +1670,7 @@ out:
        if (page && page != locked_page) {
                if (from_pagecache) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                } else
                        __free_pages(page, 0);
        }
index de17bb232ff8d30906c53f34248ce99ea566562a..cfaeef18cbcabc4baa818c54fb17ef4576f9368d 100644 (file)
@@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                                        *pinned_page = page;
                                        break;
                                }
-                               page_cache_release(page);
+                               put_page(page);
                        }
                        /*
                         * drop cap refs first because getattr while
index fadc243dfb284b90a63961f4a48c372f483c1142..4fb2bbc2a2722af6e9ccf84cf9fc80ce7f3edbeb 100644 (file)
@@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
        struct inode *dir = d_inode(parent);
        struct dentry *dentry, *last = NULL;
        struct ceph_dentry_info *di;
-       unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
+       unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
        int err = 0;
        loff_t ptr_pos = 0;
        struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
                }
 
                err = -EAGAIN;
-               pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
+               pgoff = ptr_pos >> PAGE_SHIFT;
                if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
                        ceph_readdir_cache_release(&cache_ctl);
                        cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
index ef38f01c1795a1187f2190a05f7ebd06bd9bc69e..a79f9269831e383ba03bbe5fe01f0e8f8bbec942 100644 (file)
@@ -466,7 +466,7 @@ more:
                        ret += zlen;
                }
 
-               didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
+               didpages = (page_align + ret) >> PAGE_SHIFT;
                pos += ret;
                read = pos - off;
                left -= ret;
@@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 
        if (write) {
                ret = invalidate_inode_pages2_range(inode->i_mapping,
-                                       pos >> PAGE_CACHE_SHIFT,
-                                       (pos + count) >> PAGE_CACHE_SHIFT);
+                                       pos >> PAGE_SHIFT,
+                                       (pos + count) >> PAGE_SHIFT);
                if (ret < 0)
                        dout("invalidate_inode_pages2_range returned %d\n", ret);
 
@@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                         * may block.
                         */
                        truncate_inode_pages_range(inode->i_mapping, pos,
-                                       (pos+len) | (PAGE_CACHE_SIZE - 1));
+                                       (pos+len) | (PAGE_SIZE - 1));
 
                        osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
                }
@@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                return ret;
 
        ret = invalidate_inode_pages2_range(inode->i_mapping,
-                                           pos >> PAGE_CACHE_SHIFT,
-                                           (pos + count) >> PAGE_CACHE_SHIFT);
+                                           pos >> PAGE_SHIFT,
+                                           (pos + count) >> PAGE_SHIFT);
        if (ret < 0)
                dout("invalidate_inode_pages2_range returned %d\n", ret);
 
@@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                 * write from beginning of first page,
                 * regardless of io alignment
                 */
-               num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
                pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
                if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@ again:
        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
        if (pinned_page) {
-               page_cache_release(pinned_page);
+               put_page(pinned_page);
                pinned_page = NULL;
        }
        ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@ again:
                if (retry_op == READ_INLINE) {
                        BUG_ON(ret > 0 || read > 0);
                        if (iocb->ki_pos < i_size &&
-                           iocb->ki_pos < PAGE_CACHE_SIZE) {
+                           iocb->ki_pos < PAGE_SIZE) {
                                loff_t end = min_t(loff_t, i_size,
                                                   iocb->ki_pos + len);
-                               end = min_t(loff_t, end, PAGE_CACHE_SIZE);
+                               end = min_t(loff_t, end, PAGE_SIZE);
                                if (statret < end)
                                        zero_user_segment(page, statret, end);
                                ret = copy_page_to_iter(page,
@@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page(
        struct inode *inode, loff_t offset, unsigned size)
 {
        struct page *page;
-       pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+       pgoff_t index = offset >> PAGE_SHIFT;
 
        page = find_lock_page(inode->i_mapping, index);
        if (page) {
                wait_on_page_writeback(page);
-               zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
+               zero_user(page, offset & (PAGE_SIZE - 1), size);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
                                      loff_t length)
 {
-       loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
+       loff_t nearly = round_up(offset, PAGE_SIZE);
        if (offset < nearly) {
                loff_t size = nearly - offset;
                if (length < size)
@@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
                offset += size;
                length -= size;
        }
-       if (length >= PAGE_CACHE_SIZE) {
-               loff_t size = round_down(length, PAGE_CACHE_SIZE);
+       if (length >= PAGE_SIZE) {
+               loff_t size = round_down(length, PAGE_SIZE);
                truncate_pagecache_range(inode, offset, offset + size - 1);
                offset += size;
                length -= size;
index ed58b168904a9c2425badda5ff50a05832249705..edfade0377388880f41216ae30bc8c79626790c0 100644 (file)
@@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
 {
        if (ctl->page) {
                kunmap(ctl->page);
-               page_cache_release(ctl->page);
+               put_page(ctl->page);
                ctl->page = NULL;
        }
 }
@@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
                              struct ceph_mds_request *req)
 {
        struct ceph_inode_info *ci = ceph_inode(dir);
-       unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
+       unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
        unsigned idx = ctl->index % nsize;
        pgoff_t pgoff = ctl->index / nsize;
 
@@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
                unlock_page(ctl->page);
                ctl->dentries = kmap(ctl->page);
                if (idx == 0)
-                       memset(ctl->dentries, 0, PAGE_CACHE_SIZE);
+                       memset(ctl->dentries, 0, PAGE_SIZE);
        }
 
        if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
index 44852c3ae5311a3367ca081685f8baa7ecd60bed..85b8517f17a09bb051f36f102329258a822ec4f5 100644 (file)
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
        if (atomic_dec_and_test(&s->s_ref)) {
                if (s->s_auth.authorizer)
-                       ceph_auth_destroy_authorizer(
-                               s->s_mdsc->fsc->client->monc.auth,
-                               s->s_auth.authorizer);
+                       ceph_auth_destroy_authorizer(s->s_auth.authorizer);
                kfree(s);
        }
 }
@@ -1610,7 +1608,7 @@ again:
        while (!list_empty(&tmp_list)) {
                if (!msg) {
                        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
-                                       PAGE_CACHE_SIZE, GFP_NOFS, false);
+                                       PAGE_SIZE, GFP_NOFS, false);
                        if (!msg)
                                goto out_err;
                        head = msg->front.iov_base;
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        struct ceph_auth_handshake *auth = &s->s_auth;
 
        if (force_new && auth->authorizer) {
-               ceph_auth_destroy_authorizer(ac, auth->authorizer);
+               ceph_auth_destroy_authorizer(auth->authorizer);
                auth->authorizer = NULL;
        }
        if (!auth->authorizer) {
index 37712ccffcc6b67c2f98a866c9c2be40324bc6ae..ee69a537dba53bc7770ac92997263d1a1ec190ad 100644 (file)
@@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed {
 /*
  * cap releases are batched and sent to the MDS en masse.
  */
-#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE -                      \
+#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE -                    \
                                sizeof(struct ceph_mds_cap_release)) /  \
                               sizeof(struct ceph_mds_cap_item))
 
index c973043deb0ecc2d4e002b323afc4ffb6c9c7782..f12d5e2955c223c35145bdf550d5669b823a14bc 100644 (file)
@@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
 
        /* set up mempools */
        err = -ENOMEM;
-       page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
+       page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
        size = sizeof (struct page *) * (page_count ? page_count : 1);
        fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
        if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb,
        int err;
 
        /* set ra_pages based on rasize mount option? */
-       if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
+       if (fsc->mount_options->rasize >= PAGE_SIZE)
                fsc->backing_dev_info.ra_pages =
-                       (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
+                       (fsc->mount_options->rasize + PAGE_SIZE - 1)
                        >> PAGE_SHIFT;
        else
                fsc->backing_dev_info.ra_pages =
-                       VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+                       VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
 
        err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
                           atomic_long_inc_return(&bdi_seq));
index 1d86fc620e5c2a89e83e1d35dcf49364f289f83d..89201564c346227b34fae143cd8936298aab9364 100644 (file)
@@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
        cifs_dbg(FYI, "about to flush pages\n");
        /* should we flush first and last page first */
        truncate_inode_pages_range(&target_inode->i_data, destoff,
-                                  PAGE_CACHE_ALIGN(destoff + len)-1);
+                                  PAGE_ALIGN(destoff + len)-1);
 
        if (target_tcon->ses->server->ops->duplicate_extents)
                rc = target_tcon->ses->server->ops->duplicate_extents(xid,
index d21da9f05baec9dcddeb598d9050b75b0f6d1fad..f2cc0b3d1af798aa3fe57ac2b817da011635e573 100644 (file)
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
  *
  * Note that this might make for "interesting" allocation problems during
  * writeback however as we have to allocate an array of pointers for the
- * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
  *
  * For reads, there is a similar problem as we need to allocate an array
  * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
 
 /*
  * The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
  * a single wsize request with a single call.
  */
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
index 76fcb50295a38b63a58a0d3f656d023e02b988a8..a894bf809ff71ffd363407e5d380d619fd364fa9 100644 (file)
@@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 
                wsize = server->ops->wp_retry_size(inode);
                if (wsize < rest_len) {
-                       nr_pages = wsize / PAGE_CACHE_SIZE;
+                       nr_pages = wsize / PAGE_SIZE;
                        if (!nr_pages) {
                                rc = -ENOTSUPP;
                                break;
                        }
-                       cur_len = nr_pages * PAGE_CACHE_SIZE;
-                       tailsz = PAGE_CACHE_SIZE;
+                       cur_len = nr_pages * PAGE_SIZE;
+                       tailsz = PAGE_SIZE;
                } else {
-                       nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE);
+                       nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
                        cur_len = rest_len;
-                       tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE;
+                       tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
                }
 
                wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
                wdata2->sync_mode = wdata->sync_mode;
                wdata2->nr_pages = nr_pages;
                wdata2->offset = page_offset(wdata2->pages[0]);
-               wdata2->pagesz = PAGE_CACHE_SIZE;
+               wdata2->pagesz = PAGE_SIZE;
                wdata2->tailsz = tailsz;
                wdata2->bytes = cur_len;
 
@@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
                        if (rc != 0 && rc != -EAGAIN) {
                                SetPageError(wdata2->pages[j]);
                                end_page_writeback(wdata2->pages[j]);
-                               page_cache_release(wdata2->pages[j]);
+                               put_page(wdata2->pages[j]);
                        }
                }
 
@@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work)
                else if (wdata->result < 0)
                        SetPageError(page);
                end_page_writeback(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (wdata->result != -EAGAIN)
                mapping_set_error(inode->i_mapping, wdata->result);
index a763cd3d9e7c80589df30ddaa9515906c6fa6138..2e2e0a6242d6ea268ded63f8b862c8b45e1a0d22 100644 (file)
@@ -2918,7 +2918,7 @@ static inline void
 cifs_reclassify_socket4(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       BUG_ON(sock_owned_by_user(sk));
+       BUG_ON(!sock_allow_reclassification(sk));
        sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
                &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
 }
@@ -2927,7 +2927,7 @@ static inline void
 cifs_reclassify_socket6(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       BUG_ON(sock_owned_by_user(sk));
+       BUG_ON(!sock_allow_reclassification(sk));
        sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
                &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
 }
@@ -3630,7 +3630,7 @@ try_mount_again:
        cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
 
        /* tune readahead according to rsize */
-       cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
+       cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
 
 remote_path_check:
 #ifdef CONFIG_CIFS_DFS_UPCALL
index ff882aeaccc67c404a289fff472a26d8537e1128..c03d0744648b6412a0f30222fee249784d6b4c6a 100644 (file)
@@ -1833,7 +1833,7 @@ refind_writable:
 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
 {
        struct address_space *mapping = page->mapping;
-       loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       loff_t offset = (loff_t)page->index << PAGE_SHIFT;
        char *write_data;
        int rc = -EFAULT;
        int bytes_written = 0;
@@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
        write_data = kmap(page);
        write_data += from;
 
-       if ((to > PAGE_CACHE_SIZE) || (from > to)) {
+       if ((to > PAGE_SIZE) || (from > to)) {
                kunmap(page);
                return -EIO;
        }
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
         * find_get_pages_tag seems to return a max of 256 on each
         * iteration, so we must call it several times in order to
         * fill the array or the wsize is effectively limited to
-        * 256 * PAGE_CACHE_SIZE.
+        * 256 * PAGE_SIZE.
         */
        *found_pages = 0;
        pages = wdata->pages;
@@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
 
        /* put any pages we aren't going to use */
        for (i = nr_pages; i < found_pages; i++) {
-               page_cache_release(wdata->pages[i]);
+               put_page(wdata->pages[i]);
                wdata->pages[i] = NULL;
        }
 
@@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
        wdata->sync_mode = wbc->sync_mode;
        wdata->nr_pages = nr_pages;
        wdata->offset = page_offset(wdata->pages[0]);
-       wdata->pagesz = PAGE_CACHE_SIZE;
+       wdata->pagesz = PAGE_SIZE;
        wdata->tailsz = min(i_size_read(mapping->host) -
                        page_offset(wdata->pages[nr_pages - 1]),
-                       (loff_t)PAGE_CACHE_SIZE);
-       wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
+                       (loff_t)PAGE_SIZE);
+       wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
 
        if (wdata->cfile != NULL)
                cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping,
         * If wsize is smaller than the page cache size, default to writing
         * one page at a time via cifs_writepage
         */
-       if (cifs_sb->wsize < PAGE_CACHE_SIZE)
+       if (cifs_sb->wsize < PAGE_SIZE)
                return generic_writepages(mapping, wbc);
 
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = true;
                scanned = true;
@@ -2071,7 +2071,7 @@ retry:
                if (rc)
                        break;
 
-               tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
+               tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
 
                wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
                                                  &found_pages);
@@ -2111,7 +2111,7 @@ retry:
                                else
                                        SetPageError(wdata->pages[i]);
                                end_page_writeback(wdata->pages[i]);
-                               page_cache_release(wdata->pages[i]);
+                               put_page(wdata->pages[i]);
                        }
                        if (rc != -EAGAIN)
                                mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
 
        xid = get_xid();
 /* BB add check for wbc flags */
-       page_cache_get(page);
+       get_page(page);
        if (!PageUptodate(page))
                cifs_dbg(FYI, "ppw - page not up to date\n");
 
@@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
         */
        set_page_writeback(page);
 retry_write:
-       rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
+       rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
        if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
                goto retry_write;
        else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@ retry_write:
        else
                SetPageUptodate(page);
        end_page_writeback(page);
-       page_cache_release(page);
+       put_page(page);
        free_xid(xid);
        return rc;
 }
@@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
                if (copied == len)
                        SetPageUptodate(page);
                ClearPageChecked(page);
-       } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
+       } else if (!PageUptodate(page) && copied == PAGE_SIZE)
                SetPageUptodate(page);
 
        if (!PageUptodate(page)) {
                char *page_data;
-               unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+               unsigned offset = pos & (PAGE_SIZE - 1);
                unsigned int xid;
 
                xid = get_xid();
@@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
        }
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return rc;
 }
@@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work)
                    (rdata->result == -EAGAIN && got_bytes))
                        cifs_readpage_to_fscache(rdata->mapping->host, page);
 
-               got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
+               got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
 
-               page_cache_release(page);
+               put_page(page);
                rdata->pages[i] = NULL;
        }
        kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
 
        /* determine the eof that the server (probably) has */
        eof = CIFS_I(rdata->mapping->host)->server_eof;
-       eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+       eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
        cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
 
        rdata->got_bytes = 0;
-       rdata->tailsz = PAGE_CACHE_SIZE;
+       rdata->tailsz = PAGE_SIZE;
        for (i = 0; i < nr_pages; i++) {
                struct page *page = rdata->pages[i];
 
-               if (len >= PAGE_CACHE_SIZE) {
+               if (len >= PAGE_SIZE) {
                        /* enough data to fill the page */
                        iov.iov_base = kmap(page);
-                       iov.iov_len = PAGE_CACHE_SIZE;
+                       iov.iov_len = PAGE_SIZE;
                        cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
                                 i, page->index, iov.iov_base, iov.iov_len);
-                       len -= PAGE_CACHE_SIZE;
+                       len -= PAGE_SIZE;
                } else if (len > 0) {
                        /* enough for partial page, fill and zero the rest */
                        iov.iov_base = kmap(page);
@@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
                        cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
                                 i, page->index, iov.iov_base, iov.iov_len);
                        memset(iov.iov_base + len,
-                               '\0', PAGE_CACHE_SIZE - len);
+                               '\0', PAGE_SIZE - len);
                        rdata->tailsz = len;
                        len = 0;
                } else if (page->index > eof_index) {
@@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
                         * to prevent the VFS from repeatedly attempting to
                         * fill them until the writes are flushed.
                         */
-                       zero_user(page, 0, PAGE_CACHE_SIZE);
+                       zero_user(page, 0, PAGE_SIZE);
                        lru_cache_add_file(page);
                        flush_dcache_page(page);
                        SetPageUptodate(page);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        rdata->pages[i] = NULL;
                        rdata->nr_pages--;
                        continue;
@@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
                        /* no need to hold page hostage */
                        lru_cache_add_file(page);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        rdata->pages[i] = NULL;
                        rdata->nr_pages--;
                        continue;
@@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
        }
 
        /* move first page to the tmplist */
-       *offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
-       *bytes = PAGE_CACHE_SIZE;
+       *offset = (loff_t)page->index << PAGE_SHIFT;
+       *bytes = PAGE_SIZE;
        *nr_pages = 1;
        list_move_tail(&page->lru, tmplist);
 
@@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
 
                /* would this page push the read over the rsize? */
-               if (*bytes + PAGE_CACHE_SIZE > rsize)
+               if (*bytes + PAGE_SIZE > rsize)
                        break;
 
                __SetPageLocked(page);
@@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
                }
                list_move_tail(&page->lru, tmplist);
-               (*bytes) += PAGE_CACHE_SIZE;
+               (*bytes) += PAGE_SIZE;
                expected_index++;
                (*nr_pages)++;
        }
@@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                 * reach this point however since we set ra_pages to 0 when the
                 * rsize is smaller than a cache page.
                 */
-               if (unlikely(rsize < PAGE_CACHE_SIZE)) {
+               if (unlikely(rsize < PAGE_SIZE)) {
                        add_credits_and_wake_if(server, credits, 0);
                        return 0;
                }
@@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                                list_del(&page->lru);
                                lru_cache_add_file(page);
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                        }
                        rc = -ENOMEM;
                        add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                rdata->offset = offset;
                rdata->bytes = bytes;
                rdata->pid = pid;
-               rdata->pagesz = PAGE_CACHE_SIZE;
+               rdata->pagesz = PAGE_SIZE;
                rdata->read_into_pages = cifs_readpages_read_into_pages;
                rdata->credits = credits;
 
@@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                                page = rdata->pages[i];
                                lru_cache_add_file(page);
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                        }
                        /* Fallback to the readpage in error/reconnect cases */
                        kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        read_data = kmap(page);
        /* for reads over a certain size could initiate async read ahead */
 
-       rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
+       rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
 
        if (rc < 0)
                goto io_error;
@@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        file_inode(file)->i_atime =
                current_fs_time(file_inode(file)->i_sb);
 
-       if (PAGE_CACHE_SIZE > rc)
-               memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
+       if (PAGE_SIZE > rc)
+               memset(read_data + rc, 0, PAGE_SIZE - rc);
 
        flush_dcache_page(page);
        SetPageUptodate(page);
@@ -3608,7 +3608,7 @@ read_complete:
 
 static int cifs_readpage(struct file *file, struct page *page)
 {
-       loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       loff_t offset = (loff_t)page->index << PAGE_SHIFT;
        int rc = -EACCES;
        unsigned int xid;
 
@@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
                        struct page **pagep, void **fsdata)
 {
        int oncethru = 0;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
+       pgoff_t index = pos >> PAGE_SHIFT;
+       loff_t offset = pos & (PAGE_SIZE - 1);
        loff_t page_start = pos & PAGE_MASK;
        loff_t i_size;
        struct page *page;
@@ -3703,7 +3703,7 @@ start:
         * the server. If the write is short, we'll end up doing a sync write
         * instead.
         */
-       if (len == PAGE_CACHE_SIZE)
+       if (len == PAGE_SIZE)
                goto out;
 
        /*
@@ -3718,7 +3718,7 @@ start:
                    (offset == 0 && (pos + len) >= i_size)) {
                        zero_user_segments(page, 0, offset,
                                           offset + len,
-                                          PAGE_CACHE_SIZE);
+                                          PAGE_SIZE);
                        /*
                         * PageChecked means that the parts of the page
                         * to which we're not writing are considered up
@@ -3737,7 +3737,7 @@ start:
                 * do a sync write instead since PG_uptodate isn't set.
                 */
                cifs_readpage_worker(file, page, &page_start);
-               page_cache_release(page);
+               put_page(page);
                oncethru = 1;
                goto start;
        } else {
@@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset,
 {
        struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
 
-       if (offset == 0 && length == PAGE_CACHE_SIZE)
+       if (offset == 0 && length == PAGE_SIZE)
                cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
 }
 
@@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page)
 {
        int rc = 0;
        loff_t range_start = page_offset(page);
-       loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+       loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_ALL,
                .nr_to_write = 0,
index aeb26dbfa1bf2dbfb2d111316064d240ef807336..5f9ad5c42180909e4aeeb2ab9edc68e41a478559 100644 (file)
@@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode)
 
                /* check if server can support readpages */
                if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
-                               PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+                               PAGE_SIZE + MAX_CIFS_HDR_SIZE)
                        inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
                else
                        inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 
 static int cifs_truncate_page(struct address_space *mapping, loff_t from)
 {
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+       pgoff_t index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE - 1);
        struct page *page;
        int rc = 0;
 
@@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
        if (!page)
                return -ENOMEM;
 
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return rc;
 }
 
index a8f3b589a2dfe5f030f4073aa46d79b965c77dc5..cfd91320e869f5ba6053f7364707310a18e68318 100644 (file)
@@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *inode;
        struct dentry *root;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = CONFIGFS_MAGIC;
        sb->s_op = &configfs_ops;
        sb->s_time_gran = 1;
index 445d1c2d7646751da385cddd50fa220952c49a6b..9d4e7ea311f455846b84b8b521d7d25a9ba20ffa 100644 (file)
@@ -86,26 +86,26 @@ Block Size
 
 (Block size in cramfs refers to the size of input data that is
 compressed at a time.  It's intended to be somewhere around
-PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_readpage's convenience.)
 
 The superblock ought to indicate the block size that the fs was
 written for, since comments in <linux/pagemap.h> indicate that
-PAGE_CACHE_SIZE may grow in future (if I interpret the comment
+PAGE_SIZE may grow in future (if I interpret the comment
 correctly).
 
-Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
-for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
+Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
+for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
 turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
 This discrepancy is a bug, though it's not clear which should be
 changed.
 
-One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
+One option is to change mkcramfs to take its PAGE_SIZE from
 <asm/page.h>.  Personally I don't like this option, but it does
 require the least amount of change: just change `#define
-PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'.  The disadvantage
+PAGE_SIZE (4096)' to `#include <asm/page.h>'.  The disadvantage
 is that the generated cramfs cannot always be shared between different
 kernels, not even necessarily kernels of the same architecture if
-PAGE_CACHE_SIZE is subject to change between kernel versions
+PAGE_SIZE is subject to change between kernel versions
 (currently possible with arm and ia64).
 
 The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size.  The options are:
   1. Always 4096 bytes.
 
   2. Writer chooses blocksize; kernel adapts but rejects blocksize >
-     PAGE_CACHE_SIZE.
+     PAGE_SIZE.
 
   3. Writer chooses blocksize; kernel adapts even to blocksize >
-     PAGE_CACHE_SIZE.
+     PAGE_SIZE.
 
 It's easy enough to change the kernel to use a smaller value than
-PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_readpage read multiple blocks.
 
-The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE
+The cost of option 1 is that kernels with a larger PAGE_SIZE
 value don't get as good compression as they can.
 
 The cost of option 2 relative to option 1 is that the code uses
 variables instead of #define'd constants.  The gain is that people
-with kernels having larger PAGE_CACHE_SIZE can make use of that if
+with kernels having larger PAGE_SIZE can make use of that if
 they don't mind their cramfs being inaccessible to kernels with
-smaller PAGE_CACHE_SIZE values.
+smaller PAGE_SIZE values.
 
 Option 3 is easy to implement if we don't mind being CPU-inefficient:
 e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
index b862bc219cd7c7d269a7ba9e786559d1595315d7..3a32ddf98095faad5f56d2332ab35ef110e90e58 100644 (file)
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
  * page cache and dentry tree anyway..
  *
  * This also acts as a way to guarantee contiguous areas of up to
- * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
+ * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
  * worry about end-of-buffer issues even when decompressing a full
  * page cache.
  */
@@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
  */
 #define BLKS_PER_BUF_SHIFT     (2)
 #define BLKS_PER_BUF           (1 << BLKS_PER_BUF_SHIFT)
-#define BUFFER_SIZE            (BLKS_PER_BUF*PAGE_CACHE_SIZE)
+#define BUFFER_SIZE            (BLKS_PER_BUF*PAGE_SIZE)
 
 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
 static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
 
        if (!len)
                return NULL;
-       blocknr = offset >> PAGE_CACHE_SHIFT;
-       offset &= PAGE_CACHE_SIZE - 1;
+       blocknr = offset >> PAGE_SHIFT;
+       offset &= PAGE_SIZE - 1;
 
        /* Check if an existing buffer already has the data.. */
        for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
                        continue;
                if (blocknr < buffer_blocknr[i])
                        continue;
-               blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
+               blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
                blk_offset += offset;
                if (blk_offset + len > BUFFER_SIZE)
                        continue;
                return read_buffers[i] + blk_offset;
        }
 
-       devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
+       devsize = mapping->host->i_size >> PAGE_SHIFT;
 
        /* Ok, read in BLKS_PER_BUF pages completely first. */
        for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
                        wait_on_page_locked(page);
                        if (!PageUptodate(page)) {
                                /* asynchronous error */
-                               page_cache_release(page);
+                               put_page(page);
                                pages[i] = NULL;
                        }
                }
@@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
                struct page *page = pages[i];
 
                if (page) {
-                       memcpy(data, kmap(page), PAGE_CACHE_SIZE);
+                       memcpy(data, kmap(page), PAGE_SIZE);
                        kunmap(page);
-                       page_cache_release(page);
+                       put_page(page);
                } else
-                       memset(data, 0, PAGE_CACHE_SIZE);
-               data += PAGE_CACHE_SIZE;
+                       memset(data, 0, PAGE_SIZE);
+               data += PAGE_SIZE;
        }
        return read_buffers[buffer] + offset;
 }
@@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
        buf->f_type = CRAMFS_MAGIC;
-       buf->f_bsize = PAGE_CACHE_SIZE;
+       buf->f_bsize = PAGE_SIZE;
        buf->f_blocks = CRAMFS_SB(sb)->blocks;
        buf->f_bfree = 0;
        buf->f_bavail = 0;
@@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
        int bytes_filled;
        void *pgdata;
 
-       maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        bytes_filled = 0;
        pgdata = kmap(page);
 
@@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page)
 
                if (compr_len == 0)
                        ; /* hole */
-               else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
+               else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
                        pr_err("bad compressed blocksize %u\n",
                                compr_len);
                        goto err;
                } else {
                        mutex_lock(&read_mutex);
                        bytes_filled = cramfs_uncompress_block(pgdata,
-                                PAGE_CACHE_SIZE,
+                                PAGE_SIZE,
                                 cramfs_read(sb, start_offset, compr_len),
                                 compr_len);
                        mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
                }
        }
 
-       memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
+       memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
        flush_dcache_page(page);
        kunmap(page);
        SetPageUptodate(page);
index 06cd1a22240b4c01c46469bab3e431c800f77d13..2fc8c43ce531de02379c93e85c03f95c0d3ca793 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/ratelimit.h>
 #include <linux/bio.h>
 #include <linux/dcache.h>
+#include <linux/namei.h>
 #include <linux/fscrypto.h>
 #include <linux/ecryptfs.h>
 
@@ -81,13 +82,14 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
 /**
  * fscrypt_get_ctx() - Gets an encryption context
  * @inode:       The inode for which we are doing the crypto
+ * @gfp_flags:   The gfp flag for memory allocation
  *
  * Allocates and initializes an encryption context.
  *
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
 {
        struct fscrypt_ctx *ctx = NULL;
        struct fscrypt_info *ci = inode->i_crypt_info;
@@ -113,7 +115,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
                list_del(&ctx->free_list);
        spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
        if (!ctx) {
-               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
                if (!ctx)
                        return ERR_PTR(-ENOMEM);
                ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -147,7 +149,8 @@ typedef enum {
 
 static int do_page_crypto(struct inode *inode,
                        fscrypt_direction_t rw, pgoff_t index,
-                       struct page *src_page, struct page *dest_page)
+                       struct page *src_page, struct page *dest_page,
+                       gfp_t gfp_flags)
 {
        u8 xts_tweak[FS_XTS_TWEAK_SIZE];
        struct skcipher_request *req = NULL;
@@ -157,7 +160,7 @@ static int do_page_crypto(struct inode *inode,
        struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = skcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, gfp_flags);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                "%s: crypto_request_alloc() failed\n",
@@ -175,10 +178,10 @@ static int do_page_crypto(struct inode *inode,
                        FS_XTS_TWEAK_SIZE - sizeof(index));
 
        sg_init_table(&dst, 1);
-       sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+       sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
        sg_init_table(&src, 1);
-       sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+       sg_set_page(&src, src_page, PAGE_SIZE, 0);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
                                        xts_tweak);
        if (rw == FS_DECRYPT)
                res = crypto_skcipher_decrypt(req);
@@ -199,10 +202,9 @@ static int do_page_crypto(struct inode *inode,
        return 0;
 }
 
-static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
 {
-       ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool,
-                                                       GFP_NOWAIT);
+       ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
        if (ctx->w.bounce_page == NULL)
                return ERR_PTR(-ENOMEM);
        ctx->flags |= FS_WRITE_PATH_FL;
@@ -213,6 +215,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
  * fscypt_encrypt_page() - Encrypts a page
  * @inode:          The inode for which the encryption should take place
  * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags:      The gfp flag for memory allocation
  *
  * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
  * encryption context.
@@ -225,7 +228,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
  * error value or NULL.
  */
 struct page *fscrypt_encrypt_page(struct inode *inode,
-                               struct page *plaintext_page)
+                               struct page *plaintext_page, gfp_t gfp_flags)
 {
        struct fscrypt_ctx *ctx;
        struct page *ciphertext_page = NULL;
@@ -233,18 +236,19 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
 
        BUG_ON(!PageLocked(plaintext_page));
 
-       ctx = fscrypt_get_ctx(inode);
+       ctx = fscrypt_get_ctx(inode, gfp_flags);
        if (IS_ERR(ctx))
                return (struct page *)ctx;
 
        /* The encryption operation will require a bounce page. */
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
        if (IS_ERR(ciphertext_page))
                goto errout;
 
        ctx->w.control_page = plaintext_page;
        err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
-                                       plaintext_page, ciphertext_page);
+                                       plaintext_page, ciphertext_page,
+                                       gfp_flags);
        if (err) {
                ciphertext_page = ERR_PTR(err);
                goto errout;
@@ -275,7 +279,7 @@ int fscrypt_decrypt_page(struct page *page)
        BUG_ON(!PageLocked(page));
 
        return do_page_crypto(page->mapping->host,
-                       FS_DECRYPT, page->index, page, page);
+                       FS_DECRYPT, page->index, page, page, GFP_NOFS);
 }
 EXPORT_SYMBOL(fscrypt_decrypt_page);
 
@@ -287,13 +291,13 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
        struct bio *bio;
        int ret, err = 0;
 
-       BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+       BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
-       ctx = fscrypt_get_ctx(inode);
+       ctx = fscrypt_get_ctx(inode, GFP_NOFS);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
        if (IS_ERR(ciphertext_page)) {
                err = PTR_ERR(ciphertext_page);
                goto errout;
@@ -301,11 +305,12 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
 
        while (len--) {
                err = do_page_crypto(inode, FS_ENCRYPT, lblk,
-                                               ZERO_PAGE(0), ciphertext_page);
+                                       ZERO_PAGE(0), ciphertext_page,
+                                       GFP_NOFS);
                if (err)
                        goto errout;
 
-               bio = bio_alloc(GFP_KERNEL, 1);
+               bio = bio_alloc(GFP_NOWAIT, 1);
                if (!bio) {
                        err = -ENOMEM;
                        goto errout;
@@ -345,13 +350,20 @@ EXPORT_SYMBOL(fscrypt_zeroout_range);
  */
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct fscrypt_info *ci = dir->i_crypt_info;
+       struct dentry *dir;
+       struct fscrypt_info *ci;
        int dir_has_key, cached_with_key;
 
-       if (!dir->i_sb->s_cop->is_encrypted(dir))
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       dir = dget_parent(dentry);
+       if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+               dput(dir);
                return 0;
+       }
 
+       ci = d_inode(dir)->i_crypt_info;
        if (ci && ci->ci_keyring_key &&
            (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
                                          (1 << KEY_FLAG_REVOKED) |
@@ -363,6 +375,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
        cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
        spin_unlock(&dentry->d_lock);
        dir_has_key = (ci != NULL);
+       dput(dir);
 
        /*
         * If the dentry was cached without the key, and it is a
index 90322eb7498c13289a346ce82a64d8ab34c23851..75ba46d82a761cc8aced5e8ed2041a1d2c336e60 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
        size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (vmf->pgoff >= size) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                return VM_FAULT_SIGBUS;
        }
 
@@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,
 }
 
 #define NO_SECTOR -1
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
+#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
 
 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
                sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
        if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
                return 0;
 
-       start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
-       end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
+       start_index = wbc->range_start >> PAGE_SHIFT;
+       end_index = wbc->range_end >> PAGE_SHIFT;
        pmd_index = DAX_PMD_INDEX(start_index);
 
        rcu_read_lock();
@@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        page = find_get_page(mapping, vmf->pgoff);
        if (page) {
                if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-                       page_cache_release(page);
+                       put_page(page);
                        return VM_FAULT_RETRY;
                }
                if (unlikely(page->mapping != mapping)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
                size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 
        if (page) {
                unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-                                                       PAGE_CACHE_SIZE, 0);
+                                                       PAGE_SIZE, 0);
                delete_from_page_cache(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
        }
 
@@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
  unlock_page:
        if (page) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        goto out;
 }
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
  * you are truncating a file, the helper function dax_truncate_page() may be
  * more convenient.
  *
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  * took care of disposing of the unnecessary blocks.  Even if the filesystem
  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
                                                        get_block_t get_block)
 {
        struct buffer_head bh;
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       pgoff_t index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE-1);
        int err;
 
        /* Block boundary? Nothing to do */
        if (!length)
                return 0;
-       BUG_ON((offset + length) > PAGE_CACHE_SIZE);
+       BUG_ON((offset + length) > PAGE_SIZE);
 
        memset(&bh, 0, sizeof(bh));
        bh.b_bdev = inode->i_sb->s_bdev;
-       bh.b_size = PAGE_CACHE_SIZE;
+       bh.b_size = PAGE_SIZE;
        err = get_block(inode, index, &bh, 0);
        if (err < 0)
                return err;
@@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
                struct block_device *bdev = bh.b_bdev;
                struct blk_dax_ctl dax = {
                        .sector = to_sector(&bh, inode),
-                       .size = PAGE_CACHE_SIZE,
+                       .size = PAGE_SIZE,
                };
 
                if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
  * Similar to block_truncate_page(), this function can be called by a
  * filesystem when it is truncating a DAX file to handle the partial page.
  *
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  * took care of disposing of the unnecessary blocks.  Even if the filesystem
  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
  */
 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
 {
-       unsigned length = PAGE_CACHE_ALIGN(from) - from;
+       unsigned length = PAGE_ALIGN(from) - from;
        return dax_zero_page_range(inode, from, length, get_block);
 }
 EXPORT_SYMBOL_GPL(dax_truncate_page);
index 32ceae3e611297a9e2ff99c6be92c544026c870d..d5ecc6e477daa3ef63d630a42954ea57cb6d1e5b 100644 (file)
@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
                                DCACHE_OP_REVALIDATE    |
                                DCACHE_OP_WEAK_REVALIDATE       |
                                DCACHE_OP_DELETE        |
-                               DCACHE_OP_SELECT_INODE));
+                               DCACHE_OP_SELECT_INODE  |
+                               DCACHE_OP_REAL));
        dentry->d_op = op;
        if (!op)
                return;
@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
                dentry->d_flags |= DCACHE_OP_PRUNE;
        if (op->d_select_inode)
                dentry->d_flags |= DCACHE_OP_SELECT_INODE;
+       if (op->d_real)
+               dentry->d_flags |= DCACHE_OP_REAL;
 
 }
 EXPORT_SYMBOL(d_set_d_op);
index bece948b363df5b8d331252cd9468d1bfebead68..8580831ed237f4e81df7eb607bcd1a89c3766f33 100644 (file)
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
        if (unlikely(!inode))
                return failed_creating(dentry);
 
-       inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+       make_empty_dir_inode(inode);
        inode->i_flags |= S_AUTOMOUNT;
        inode->i_private = data;
        dentry->d_fsdata = (void *)f;
index 655f21f991606b5bf2bef600a54b814994dbed98..0b2954d7172d78e78ac98238dfb7b50c5dccb558 100644 (file)
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
 struct pts_fs_info {
        struct ida allocated_ptys;
        struct pts_mount_opts mount_opts;
+       struct super_block *sb;
        struct dentry *ptmx_dentry;
 };
 
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
        .show_options   = devpts_show_options,
 };
 
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
 {
        struct pts_fs_info *fsi;
 
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
        ida_init(&fsi->allocated_ptys);
        fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
        fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+       fsi->sb = sb;
 
        return fsi;
 }
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
        s->s_op = &devpts_sops;
        s->s_time_gran = 1;
 
-       s->s_fs_info = new_pts_fs_info();
+       s->s_fs_info = new_pts_fs_info(s);
        if (!s->s_fs_info)
                goto fail;
 
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
  * to the System V naming convention
  */
 
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi;
        int index;
        int ida_ret;
 
-       if (!sb)
+       if (!fsi)
                return -ENODEV;
 
-       fsi = DEVPTS_SB(sb);
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
        return index;
 }
 
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
        mutex_lock(&allocated_ptys_lock);
        ida_remove(&fsi->allocated_ptys, idx);
        pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
 /*
  * pty code needs to hold extra references in case of last /dev/tty close
  */
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
+       struct pts_fs_info *fsi;
+
+       sb = pts_sb_from_inode(ptmx_inode);
+       if (!sb)
+               return NULL;
+       fsi = DEVPTS_SB(sb);
+       if (!fsi)
+               return NULL;
 
        atomic_inc(&sb->s_active);
-       ihold(ptmx_inode);
+       return fsi;
 }
 
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
-       iput(ptmx_inode);
-       deactivate_super(sb);
+       deactivate_super(fsi->sb);
 }
 
 /**
@@ -604,22 +604,20 @@ void devpts_del_ref(struct inode *ptmx_inode)
  *
  * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
  */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-               void *priv)
+struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
        struct dentry *dentry;
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
        struct inode *inode;
        struct dentry *root;
-       struct pts_fs_info *fsi;
        struct pts_mount_opts *opts;
        char s[12];
 
-       if (!sb)
+       if (!fsi)
                return ERR_PTR(-ENODEV);
 
+       sb = fsi->sb;
        root = sb->s_root;
-       fsi = DEVPTS_SB(sb);
        opts = &fsi->mount_opts;
 
        inode = new_inode(sb);
@@ -630,25 +628,21 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
        inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
        inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-       init_special_inode(inode, S_IFCHR|opts->mode, device);
-       inode->i_private = priv;
+       init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
 
        sprintf(s, "%d", index);
 
-       inode_lock(d_inode(root));
-
        dentry = d_alloc_name(root, s);
        if (dentry) {
+               dentry->d_fsdata = priv;
                d_add(dentry, inode);
                fsnotify_create(d_inode(root), dentry);
        } else {
                iput(inode);
-               inode = ERR_PTR(-ENOMEM);
+               dentry = ERR_PTR(-ENOMEM);
        }
 
-       inode_unlock(d_inode(root));
-
-       return inode;
+       return dentry;
 }
 
 /**
@@ -657,24 +651,10 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
  *
  * Returns whatever was passed as priv in devpts_pty_new for a given inode.
  */
-void *devpts_get_priv(struct inode *pts_inode)
+void *devpts_get_priv(struct dentry *dentry)
 {
-       struct dentry *dentry;
-       void *priv = NULL;
-
-       BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-       /* Ensure dentry has not been deleted by devpts_pty_kill() */
-       dentry = d_find_alias(pts_inode);
-       if (!dentry)
-               return NULL;
-
-       if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-               priv = pts_inode->i_private;
-
-       dput(dentry);
-
-       return priv;
+       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+       return dentry->d_fsdata;
 }
 
 /**
@@ -683,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode)
  *
  * This is an inverse operation of devpts_pty_new.
  */
-void devpts_pty_kill(struct inode *inode)
+void devpts_pty_kill(struct dentry *dentry)
 {
-       struct super_block *sb = pts_sb_from_inode(inode);
-       struct dentry *root = sb->s_root;
-       struct dentry *dentry;
+       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
 
-       BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-       inode_lock(d_inode(root));
-
-       dentry = d_find_alias(inode);
-
-       drop_nlink(inode);
+       dentry->d_fsdata = NULL;
+       drop_nlink(dentry->d_inode);
        d_delete(dentry);
        dput(dentry);   /* d_alloc_name() in devpts_pty_new() */
-       dput(dentry);           /* d_find_alias above */
-
-       inode_unlock(d_inode(root));
 }
 
 static int __init init_devpts_fs(void)
index 476f1ecbd1f0e585171b8dfa2970ada3cb5cc2c7..472037732daf00c093846a93967a4858f0e382c7 100644 (file)
@@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
                 */
                if (dio->page_errors == 0)
                        dio->page_errors = ret;
-               page_cache_get(page);
+               get_page(page);
                dio->pages[0] = page;
                sdio->head = 0;
                sdio->tail = 1;
@@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
 {
        while (sdio->head < sdio->tail)
-               page_cache_release(dio->pages[sdio->head++]);
+               put_page(dio->pages[sdio->head++]);
 }
 
 /*
@@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
                        if (dio->rw == READ && !PageCompound(page) &&
                                        dio->should_dirty)
                                set_page_dirty_lock(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
                err = bio->bi_error;
                bio_put(bio);
@@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
                 */
                if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
                        sdio->pages_in_io--;
-               page_cache_get(sdio->cur_page);
+               get_page(sdio->cur_page);
                sdio->final_block_in_bio = sdio->cur_page_block +
                        (sdio->cur_page_len >> sdio->blkbits);
                ret = 0;
@@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
         */
        if (sdio->cur_page) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
-               page_cache_release(sdio->cur_page);
+               put_page(sdio->cur_page);
                sdio->cur_page = NULL;
                if (ret)
                        return ret;
        }
 
-       page_cache_get(page);           /* It is in dio */
+       get_page(page);         /* It is in dio */
        sdio->cur_page = page;
        sdio->cur_page_offset = offset;
        sdio->cur_page_len = len;
@@ -830,7 +830,7 @@ out:
        if (sdio->boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
                dio_bio_submit(dio, sdio);
-               page_cache_release(sdio->cur_page);
+               put_page(sdio->cur_page);
                sdio->cur_page = NULL;
        }
        return ret;
@@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
 
                                ret = get_more_blocks(dio, sdio, map_bh);
                                if (ret) {
-                                       page_cache_release(page);
+                                       put_page(page);
                                        goto out;
                                }
                                if (!buffer_mapped(map_bh))
@@ -988,7 +988,7 @@ do_holes:
 
                                /* AKPM: eargh, -ENOTBLK is a hack */
                                if (dio->rw & WRITE) {
-                                       page_cache_release(page);
+                                       put_page(page);
                                        return -ENOTBLK;
                                }
 
@@ -1001,7 +1001,7 @@ do_holes:
                                if (sdio->block_in_file >=
                                                i_size_aligned >> blkbits) {
                                        /* We hit eof */
-                                       page_cache_release(page);
+                                       put_page(page);
                                        goto out;
                                }
                                zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1041,7 @@ do_holes:
                                                  sdio->next_block_for_io,
                                                  map_bh);
                        if (ret) {
-                               page_cache_release(page);
+                               put_page(page);
                                goto out;
                        }
                        sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1057,7 @@ next_block:
                }
 
                /* Drop the ref which was taken in get_user_pages() */
-               page_cache_release(page);
+               put_page(page);
        }
 out:
        return ret;
@@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
                if (retval == 0)
                        retval = ret2;
-               page_cache_release(sdio.cur_page);
+               put_page(sdio.cur_page);
                sdio.cur_page = NULL;
        }
        if (sdio.bio)
index 00640e70ed7ae6175b328575ff109aa27c73ab1d..1ab012a27d9f3b209e6d943741a0f9b9c974201d 100644 (file)
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
                con->rx_page = alloc_page(GFP_ATOMIC);
                if (con->rx_page == NULL)
                        goto out_resched;
-               cbuf_init(&con->cb, PAGE_CACHE_SIZE);
+               cbuf_init(&con->cb, PAGE_SIZE);
        }
 
        /*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
         * buffer and the start of the currently used section (cb.base)
         */
        if (cbuf_data(&con->cb) >= con->cb.base) {
-               iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
+               iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
                iov[1].iov_len = con->cb.base;
                iov[1].iov_base = page_address(con->rx_page);
                nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
        ret = dlm_process_incoming_buffer(con->nodeid,
                                          page_address(con->rx_page),
                                          con->cb.base, con->cb.len,
-                                         PAGE_CACHE_SIZE);
+                                         PAGE_SIZE);
        if (ret == -EBADMSG) {
                log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
                          page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
        spin_lock(&con->writequeue_lock);
        e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
        if ((&e->list == &con->writequeue) ||
-           (PAGE_CACHE_SIZE - e->end < len)) {
+           (PAGE_SIZE - e->end < len)) {
                e = NULL;
        } else {
                offset = e->end;
index 64026e53722a2f20c9aef8e2bda6f73087b87b6b..d09cb4cdd09f18e56e0f4135d810c732cc609365 100644 (file)
@@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
                pg = virt_to_page(addr);
                offset = offset_in_page(addr);
                sg_set_page(&sg[i], pg, 0, offset);
-               remainder_of_page = PAGE_CACHE_SIZE - offset;
+               remainder_of_page = PAGE_SIZE - offset;
                if (size >= remainder_of_page) {
                        sg[i].length = remainder_of_page;
                        addr += remainder_of_page;
@@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
                                    struct page *page)
 {
        return ecryptfs_lower_header_size(crypt_stat) +
-              ((loff_t)page->index << PAGE_CACHE_SHIFT);
+              ((loff_t)page->index << PAGE_SHIFT);
 }
 
 /**
@@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
        size_t extent_size = crypt_stat->extent_size;
        int rc;
 
-       extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size));
+       extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
        rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
                                (extent_base + extent_offset));
        if (rc) {
@@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page)
        }
 
        for (extent_offset = 0;
-            extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+            extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
             extent_offset++) {
                rc = crypt_extent(crypt_stat, enc_extent_page, page,
                                  extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page)
        lower_offset = lower_offset_for_page(crypt_stat, page);
        enc_extent_virt = kmap(enc_extent_page);
        rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
-                                 PAGE_CACHE_SIZE);
+                                 PAGE_SIZE);
        kunmap(enc_extent_page);
        if (rc < 0) {
                ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page)
 
        lower_offset = lower_offset_for_page(crypt_stat, page);
        page_virt = kmap(page);
-       rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE,
+       rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
                                 ecryptfs_inode);
        kunmap(page);
        if (rc < 0) {
@@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page)
        }
 
        for (extent_offset = 0;
-            extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+            extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
             extent_offset++) {
                rc = crypt_extent(crypt_stat, page, page,
                                  extent_offset, DECRYPT);
@@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
        if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
                crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
        else {
-               if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
+               if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
                        crypt_stat->metadata_size =
                                ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
                else
-                       crypt_stat->metadata_size = PAGE_CACHE_SIZE;
+                       crypt_stat->metadata_size = PAGE_SIZE;
        }
 }
 
@@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                                                ECRYPTFS_VALIDATE_HEADER_SIZE);
        if (rc) {
                /* metadata is not in the file header, so try xattrs */
-               memset(page_virt, 0, PAGE_CACHE_SIZE);
+               memset(page_virt, 0, PAGE_SIZE);
                rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
                if (rc) {
                        printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
        }
 out:
        if (page_virt) {
-               memset(page_virt, 0, PAGE_CACHE_SIZE);
+               memset(page_virt, 0, PAGE_SIZE);
                kmem_cache_free(ecryptfs_header_cache, page_virt);
        }
        return rc;
index 121114e9a464318c1a9b97668d4348a995d9f338..224b49e71aa437ff36aa79bf111c1e536a9a625d 100644 (file)
@@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
        } else { /* ia->ia_size < i_size_read(inode) */
                /* We're chopping off all the pages down to the page
                 * in which ia->ia_size is located. Fill in the end of
-                * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
-                * PAGE_CACHE_SIZE with zeros. */
-               size_t num_zeros = (PAGE_CACHE_SIZE
-                                   - (ia->ia_size & ~PAGE_CACHE_MASK));
+                * that page from (ia->ia_size & ~PAGE_MASK) to
+                * PAGE_SIZE with zeros. */
+               size_t num_zeros = (PAGE_SIZE
+                                   - (ia->ia_size & ~PAGE_MASK));
 
                if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
                        truncate_setsize(inode, ia->ia_size);
index 9893d15381222481e3c142b7934e8bd934cf4215..3cf1546dca8257677572db866f86e3f2190a918e 100644 (file)
@@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
         * added the our &auth_tok_list */
        next_packet_is_auth_tok_packet = 1;
        while (next_packet_is_auth_tok_packet) {
-               size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i);
+               size_t max_packet_size = ((PAGE_SIZE - 8) - i);
 
                switch (src[i]) {
                case ECRYPTFS_TAG_3_PACKET_TYPE:
index 8b0b4a73116d07f354b45237e62662961594fa0d..1698132d0e576d4fea3690f56190242de33645fc 100644 (file)
@@ -695,12 +695,12 @@ static struct ecryptfs_cache_info {
        {
                .cache = &ecryptfs_header_cache,
                .name = "ecryptfs_headers",
-               .size = PAGE_CACHE_SIZE,
+               .size = PAGE_SIZE,
        },
        {
                .cache = &ecryptfs_xattr_cache,
                .name = "ecryptfs_xattr_cache",
-               .size = PAGE_CACHE_SIZE,
+               .size = PAGE_SIZE,
        },
        {
                .cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@ static int __init ecryptfs_init(void)
 {
        int rc;
 
-       if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) {
+       if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
                rc = -EINVAL;
                ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
                                "larger than the host's page size, and so "
@@ -826,7 +826,7 @@ static int __init ecryptfs_init(void)
                                "default eCryptfs extent size is [%u] bytes; "
                                "the page size is [%lu] bytes.\n",
                                ECRYPTFS_DEFAULT_EXTENT_SIZE,
-                               (unsigned long)PAGE_CACHE_SIZE);
+                               (unsigned long)PAGE_SIZE);
                goto out;
        }
        rc = ecryptfs_init_kmem_caches();
index 1f5865263b3eff32fed493480ce9ff7b72759af2..e6b1d80952b91d7dda9a3689fe018cf215ec5a11 100644 (file)
@@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
                                       struct ecryptfs_crypt_stat *crypt_stat)
 {
        loff_t extent_num_in_page = 0;
-       loff_t num_extents_per_page = (PAGE_CACHE_SIZE
+       loff_t num_extents_per_page = (PAGE_SIZE
                                       / crypt_stat->extent_size);
        int rc = 0;
 
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
                        char *page_virt;
 
                        page_virt = kmap_atomic(page);
-                       memset(page_virt, 0, PAGE_CACHE_SIZE);
+                       memset(page_virt, 0, PAGE_SIZE);
                        /* TODO: Support more than one header extent */
                        if (view_extent_num == 0) {
                                size_t written;
@@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
                                 - crypt_stat->metadata_size);
 
                        rc = ecryptfs_read_lower_page_segment(
-                               page, (lower_offset >> PAGE_CACHE_SHIFT),
-                               (lower_offset & ~PAGE_CACHE_MASK),
+                               page, (lower_offset >> PAGE_SHIFT),
+                               (lower_offset & ~PAGE_MASK),
                                crypt_stat->extent_size, page->mapping->host);
                        if (rc) {
                                printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
 
        if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
                rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
-                                                     PAGE_CACHE_SIZE,
+                                                     PAGE_SIZE,
                                                      page->mapping->host);
        } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
                if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
 
                } else {
                        rc = ecryptfs_read_lower_page_segment(
-                               page, page->index, 0, PAGE_CACHE_SIZE,
+                               page, page->index, 0, PAGE_SIZE,
                                page->mapping->host);
                        if (rc) {
                                printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
        struct inode *inode = page->mapping->host;
        int end_byte_in_page;
 
-       if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
+       if ((i_size_read(inode) / PAGE_SIZE) != page->index)
                goto out;
-       end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
+       end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
        if (to > end_byte_in_page)
                end_byte_in_page = to;
-       zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
+       zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
 out:
        return 0;
 }
@@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct page *page;
        loff_t prev_page_end_size;
        int rc = 0;
@@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file,
                return -ENOMEM;
        *pagep = page;
 
-       prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
+       prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
        if (!PageUptodate(page)) {
                struct ecryptfs_crypt_stat *crypt_stat =
                        &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
 
                if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
                        rc = ecryptfs_read_lower_page_segment(
-                               page, index, 0, PAGE_CACHE_SIZE, mapping->host);
+                               page, index, 0, PAGE_SIZE, mapping->host);
                        if (rc) {
                                printk(KERN_ERR "%s: Error attempting to read "
                                       "lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
                                SetPageUptodate(page);
                        } else {
                                rc = ecryptfs_read_lower_page_segment(
-                                       page, index, 0, PAGE_CACHE_SIZE,
+                                       page, index, 0, PAGE_SIZE,
                                        mapping->host);
                                if (rc) {
                                        printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file,
                } else {
                        if (prev_page_end_size
                            >= i_size_read(page->mapping->host)) {
-                               zero_user(page, 0, PAGE_CACHE_SIZE);
+                               zero_user(page, 0, PAGE_SIZE);
                                SetPageUptodate(page);
-                       } else if (len < PAGE_CACHE_SIZE) {
+                       } else if (len < PAGE_SIZE) {
                                rc = ecryptfs_decrypt_page(page);
                                if (rc) {
                                        printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file,
         * of page?  Zero it out. */
        if ((i_size_read(mapping->host) == prev_page_end_size)
            && (pos != 0))
-               zero_user(page, 0, PAGE_CACHE_SIZE);
+               zero_user(page, 0, PAGE_SIZE);
 out:
        if (unlikely(rc)) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                *pagep = NULL;
        }
        return rc;
@@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
        }
        inode_lock(lower_inode);
        size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
-                                          xattr_virt, PAGE_CACHE_SIZE);
+                                          xattr_virt, PAGE_SIZE);
        if (size < 0)
                size = 8;
        put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *page, void *fsdata)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       pgoff_t index = pos >> PAGE_SHIFT;
+       unsigned from = pos & (PAGE_SIZE - 1);
        unsigned to = from + copied;
        struct inode *ecryptfs_inode = mapping->host;
        struct ecryptfs_crypt_stat *crypt_stat =
@@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file,
                goto out;
        }
        if (!PageUptodate(page)) {
-               if (copied < PAGE_CACHE_SIZE) {
+               if (copied < PAGE_SIZE) {
                        rc = 0;
                        goto out;
                }
@@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file,
                rc = copied;
 out:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return rc;
 }
 
index 09fe622274e44df8fcf4bc040505abea95b345cf..158a3a39f82de76a916ba7fb1edf680cf4184d8f 100644 (file)
@@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
        loff_t offset;
        int rc;
 
-       offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
+       offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
                  + offset_in_page);
        virt = kmap(page_for_lower);
        rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
        else
                pos = offset;
        while (pos < (offset + size)) {
-               pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
-               size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
-               size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+               pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
+               size_t start_offset_in_page = (pos & ~PAGE_MASK);
+               size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
                loff_t total_remaining_bytes = ((offset + size) - pos);
 
                if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                         * Fill in zero values to the end of the page */
                        memset(((char *)ecryptfs_page_virt
                                + start_offset_in_page), 0,
-                               PAGE_CACHE_SIZE - start_offset_in_page);
+                               PAGE_SIZE - start_offset_in_page);
                }
 
                /* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                                                ecryptfs_page,
                                                start_offset_in_page,
                                                data_offset);
-               page_cache_release(ecryptfs_page);
+               put_page(ecryptfs_page);
                if (rc) {
                        printk(KERN_ERR "%s: Error encrypting "
                               "page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
        loff_t offset;
        int rc;
 
-       offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
+       offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
        virt = kmap(page_for_ecryptfs);
        rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
        if (rc > 0)
index dd029d13ea6140f7df1ed99fb2634a9c858f801f..553c5d2db4a442757b1224d3d12b77d4bc20caee 100644 (file)
@@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
        efivarfs_sb = sb;
 
        sb->s_maxbytes          = MAX_LFS_FILESIZE;
-       sb->s_blocksize         = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+       sb->s_blocksize         = PAGE_SIZE;
+       sb->s_blocksize_bits    = PAGE_SHIFT;
        sb->s_magic             = EFIVARFS_MAGIC;
        sb->s_op                = &efivarfs_ops;
        sb->s_d_op              = &efivarfs_d_ops;
index e5bb2abf77f9adb1e11508cec2a8fea65934bac4..547b93cbea63b522746c9cc2e6dbf70aa3abdb78 100644 (file)
@@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode)
 static inline void exofs_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
 {
        loff_t last_byte = inode->i_size;
 
-       last_byte -= page_nr << PAGE_CACHE_SHIFT;
-       if (last_byte > PAGE_CACHE_SIZE)
-               last_byte = PAGE_CACHE_SIZE;
+       last_byte -= page_nr << PAGE_SHIFT;
+       if (last_byte > PAGE_SIZE)
+               last_byte = PAGE_SIZE;
        return last_byte;
 }
 
@@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page)
        unsigned chunk_size = exofs_chunk_size(dir);
        char *kaddr = page_address(page);
        unsigned offs, rec_len;
-       unsigned limit = PAGE_CACHE_SIZE;
+       unsigned limit = PAGE_SIZE;
        struct exofs_dir_entry *p;
        char *error;
 
        /* if the page is the last one in the directory */
-       if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-               limit = dir->i_size & ~PAGE_CACHE_MASK;
+       if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+               limit = dir->i_size & ~PAGE_MASK;
                if (limit & (chunk_size - 1))
                        goto Ebadsize;
                if (!limit)
@@ -138,7 +138,7 @@ bad_entry:
        EXOFS_ERR(
                "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
                "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
-               dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+               dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
                _LLU(le64_to_cpu(p->inode_no)),
                rec_len, p->name_len);
        goto fail;
@@ -147,7 +147,7 @@ Eend:
        EXOFS_ERR("ERROR [exofs_check_page]: "
                "entry in directory(0x%lx) spans the page boundary"
                "offset=%lu, inode=0x%llx\n",
-               dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+               dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
                _LLU(le64_to_cpu(p->inode_no)));
 fail:
        SetPageChecked(page);
@@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
 {
        loff_t pos = ctx->pos;
        struct inode *inode = file_inode(file);
-       unsigned int offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = pos & ~PAGE_MASK;
+       unsigned long n = pos >> PAGE_SHIFT;
        unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
        int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
                if (IS_ERR(page)) {
                        EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
                                  inode->i_ino);
-                       ctx->pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_SIZE - offset;
                        return PTR_ERR(page);
                }
                kaddr = page_address(page);
@@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
                        if (offset) {
                                offset = exofs_validate_entry(kaddr, offset,
                                                                chunk_mask);
-                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_SHIFT) + offset;
                        }
                        file->f_version = inode->i_version;
                        need_revalidate = 0;
@@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
                kaddr = page_address(page);
                dir_end = kaddr + exofs_last_byte(dir, n);
                de = (struct exofs_dir_entry *)kaddr;
-               kaddr += PAGE_CACHE_SIZE - reclen;
+               kaddr += PAGE_SIZE - reclen;
                while ((char *)de <= kaddr) {
                        if ((char *)de == dir_end) {
                                name_len = 0;
@@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
        kunmap_atomic(kaddr);
        err = exofs_commit_chunk(page, 0, chunk_size);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index 9eaf595aeaf8840cf74f0f02141411a9fed65c1e..49e1bd00b4ecc9e0f351d795452250968cf1ca31 100644 (file)
@@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol)
 
        if (!pcol->ios) {
                int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
-                                            pcol->pg_first << PAGE_CACHE_SHIFT,
+                                            pcol->pg_first << PAGE_SHIFT,
                                             pcol->length, &pcol->ios);
 
                if (ret)
@@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page)
        struct inode *inode = pcol->inode;
        struct exofs_i_info *oi = exofs_i(inode);
        loff_t i_size = i_size_read(inode);
-       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       pgoff_t end_index = i_size >> PAGE_SHIFT;
        size_t len;
        int ret;
 
@@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page)
        pcol->that_locked_page = page;
 
        if (page->index < end_index)
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
        else if (page->index == end_index)
-               len = i_size & ~PAGE_CACHE_MASK;
+               len = i_size & ~PAGE_MASK;
        else
                len = 0;
 
@@ -442,8 +442,8 @@ try_again:
                        goto fail;
        }
 
-       if (len != PAGE_CACHE_SIZE)
-               zero_user(page, len, PAGE_CACHE_SIZE - len);
+       if (len != PAGE_SIZE)
+               zero_user(page, len, PAGE_SIZE - len);
 
        EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
                     inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page)
 
        if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
                EXOFS_DBGMSG2("index=0x%lx\n", page->index);
-               page_cache_release(page);
+               put_page(page);
                return;
        }
        EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol)
 
        BUG_ON(pcol->ios);
        ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
-                                pcol->pg_first << PAGE_CACHE_SHIFT,
+                                pcol->pg_first << PAGE_SHIFT,
                                 pcol->length, &pcol->ios);
        if (unlikely(ret))
                goto err;
@@ -696,7 +696,7 @@ static int writepage_strip(struct page *page,
        struct inode *inode = pcol->inode;
        struct exofs_i_info *oi = exofs_i(inode);
        loff_t i_size = i_size_read(inode);
-       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       pgoff_t end_index = i_size >> PAGE_SHIFT;
        size_t len;
        int ret;
 
@@ -708,9 +708,9 @@ static int writepage_strip(struct page *page,
 
        if (page->index < end_index)
                /* in this case, the page is within the limits of the file */
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
        else {
-               len = i_size & ~PAGE_CACHE_MASK;
+               len = i_size & ~PAGE_MASK;
 
                if (page->index > end_index || !len) {
                        /* in this case, the page is outside the limits
@@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping,
        long start, end, expected_pages;
        int ret;
 
-       start = wbc->range_start >> PAGE_CACHE_SHIFT;
+       start = wbc->range_start >> PAGE_SHIFT;
        end = (wbc->range_end == LLONG_MAX) ?
                        start + mapping->nrpages :
-                       wbc->range_end >> PAGE_CACHE_SHIFT;
+                       wbc->range_end >> PAGE_SHIFT;
 
        if (start || end)
                expected_pages = end - start + 1;
@@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
        }
 
         /* read modify write */
-       if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+       if (!PageUptodate(page) && (len != PAGE_SIZE)) {
                loff_t i_size = i_size_read(mapping->host);
-               pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+               pgoff_t end_index = i_size >> PAGE_SHIFT;
                size_t rlen;
 
                if (page->index < end_index)
-                       rlen = PAGE_CACHE_SIZE;
+                       rlen = PAGE_SIZE;
                else if (page->index == end_index)
-                       rlen = i_size & ~PAGE_CACHE_MASK;
+                       rlen = i_size & ~PAGE_MASK;
                else
                        rlen = 0;
 
index c20d77df2679abb185e29a79f5ceab8349204d65..622a686bb08b5e3429a316f4e849f94f42fdb8db 100644 (file)
@@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        return err;
 }
index 0c6638b40f2176b602f70ef465968b3a0dddc292..7ff6fcfa685d49158455b391bc8b30c8a1a237ef 100644 (file)
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
 {
        unsigned len = le16_to_cpu(dlen);
 
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
        if (len == EXT2_MAX_REC_LEN)
                return 1 << 16;
 #endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
 
 static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
        if (len == (1 << 16))
                return cpu_to_le16(EXT2_MAX_REC_LEN);
        else
@@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode)
 static inline void ext2_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
 {
        unsigned last_byte = inode->i_size;
 
-       last_byte -= page_nr << PAGE_CACHE_SHIFT;
-       if (last_byte > PAGE_CACHE_SIZE)
-               last_byte = PAGE_CACHE_SIZE;
+       last_byte -= page_nr << PAGE_SHIFT;
+       if (last_byte > PAGE_SIZE)
+               last_byte = PAGE_SIZE;
        return last_byte;
 }
 
@@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet)
        char *kaddr = page_address(page);
        u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
        unsigned offs, rec_len;
-       unsigned limit = PAGE_CACHE_SIZE;
+       unsigned limit = PAGE_SIZE;
        ext2_dirent *p;
        char *error;
 
-       if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-               limit = dir->i_size & ~PAGE_CACHE_MASK;
+       if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+               limit = dir->i_size & ~PAGE_MASK;
                if (limit & (chunk_size - 1))
                        goto Ebadsize;
                if (!limit)
@@ -176,7 +176,7 @@ bad_entry:
        if (!quiet)
                ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
                        "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-                       dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+                       dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
                        (unsigned long) le32_to_cpu(p->inode),
                        rec_len, p->name_len);
        goto fail;
@@ -186,7 +186,7 @@ Eend:
                ext2_error(sb, "ext2_check_page",
                        "entry in directory #%lu spans the page boundary"
                        "offset=%lu, inode=%lu",
-                       dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+                       dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
                        (unsigned long) le32_to_cpu(p->inode));
        }
 fail:
@@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
        loff_t pos = ctx->pos;
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       unsigned int offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = pos & ~PAGE_MASK;
+       unsigned long n = pos >> PAGE_SHIFT;
        unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
        unsigned char *types = NULL;
@@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
                        ext2_error(sb, __func__,
                                   "bad page in #%lu",
                                   inode->i_ino);
-                       ctx->pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_SIZE - offset;
                        return PTR_ERR(page);
                }
                kaddr = page_address(page);
                if (unlikely(need_revalidate)) {
                        if (offset) {
                                offset = ext2_validate_entry(kaddr, offset, chunk_mask);
-                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_SHIFT) + offset;
                        }
                        file->f_version = inode->i_version;
                        need_revalidate = 0;
@@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
                if (++n >= npages)
                        n = 0;
                /* next page is past the blocks we've got */
-               if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+               if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
                        ext2_error(dir->i_sb, __func__,
                                "dir %lu size %lld exceeds block count %llu",
                                dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
                kaddr = page_address(page);
                dir_end = kaddr + ext2_last_byte(dir, n);
                de = (ext2_dirent *)kaddr;
-               kaddr += PAGE_CACHE_SIZE - reclen;
+               kaddr += PAGE_SIZE - reclen;
                while ((char *)de <= kaddr) {
                        if ((char *)de == dir_end) {
                                /* We hit i_size */
@@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
        kunmap_atomic(kaddr);
        err = ext2_commit_chunk(page, 0, chunk_size);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index 7a2be8f7f3c37a69135cab528b1244e51c5fc2a2..d34843925b23f17485bb24114439b81164826122 100644 (file)
@@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                        ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
                else {
                        kunmap(dir_page);
-                       page_cache_release(dir_page);
+                       put_page(dir_page);
                }
                inode_dec_link_count(old_dir);
        }
@@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        return err;
 }
index edc053a819144999240e1c04b5a6c0e9a6e76da5..6a6c27373b5467d11dcfc056b3c17dec5cccde3b 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <linux/spinlock_types.h>
+#include <linux/namei.h>
 
 #include "ext4_extents.h"
 #include "xattr.h"
@@ -91,7 +92,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+                                           gfp_t gfp_flags)
 {
        struct ext4_crypto_ctx *ctx = NULL;
        int res = 0;
@@ -118,7 +120,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
                list_del(&ctx->free_list);
        spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
        if (!ctx) {
-               ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+               ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
                if (!ctx) {
                        res = -ENOMEM;
                        goto out;
@@ -255,7 +257,8 @@ static int ext4_page_crypto(struct inode *inode,
                            ext4_direction_t rw,
                            pgoff_t index,
                            struct page *src_page,
-                           struct page *dest_page)
+                           struct page *dest_page,
+                           gfp_t gfp_flags)
 
 {
        u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +269,7 @@ static int ext4_page_crypto(struct inode *inode,
        struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = skcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, gfp_flags);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                   "%s: crypto_request_alloc() failed\n",
@@ -283,10 +286,10 @@ static int ext4_page_crypto(struct inode *inode,
               EXT4_XTS_TWEAK_SIZE - sizeof(index));
 
        sg_init_table(&dst, 1);
-       sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+       sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
        sg_init_table(&src, 1);
-       sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+       sg_set_page(&src, src_page, PAGE_SIZE, 0);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
                                   xts_tweak);
        if (rw == EXT4_DECRYPT)
                res = crypto_skcipher_decrypt(req);
@@ -307,9 +310,10 @@ static int ext4_page_crypto(struct inode *inode,
        return 0;
 }
 
-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
+                                     gfp_t gfp_flags)
 {
-       ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+       ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
        if (ctx->w.bounce_page == NULL)
                return ERR_PTR(-ENOMEM);
        ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +336,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
  * error value or NULL.
  */
 struct page *ext4_encrypt(struct inode *inode,
-                         struct page *plaintext_page)
+                         struct page *plaintext_page,
+                         gfp_t gfp_flags)
 {
        struct ext4_crypto_ctx *ctx;
        struct page *ciphertext_page = NULL;
@@ -340,17 +345,17 @@ struct page *ext4_encrypt(struct inode *inode,
 
        BUG_ON(!PageLocked(plaintext_page));
 
-       ctx = ext4_get_crypto_ctx(inode);
+       ctx = ext4_get_crypto_ctx(inode, gfp_flags);
        if (IS_ERR(ctx))
                return (struct page *) ctx;
 
        /* The encryption operation will require a bounce page. */
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
        if (IS_ERR(ciphertext_page))
                goto errout;
        ctx->w.control_page = plaintext_page;
        err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
-                              plaintext_page, ciphertext_page);
+                              plaintext_page, ciphertext_page, gfp_flags);
        if (err) {
                ciphertext_page = ERR_PTR(err);
        errout:
@@ -378,8 +383,8 @@ int ext4_decrypt(struct page *page)
 {
        BUG_ON(!PageLocked(page));
 
-       return ext4_page_crypto(page->mapping->host,
-                               EXT4_DECRYPT, page->index, page, page);
+       return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
+                               page->index, page, page, GFP_NOFS);
 }
 
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -396,13 +401,13 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                 (unsigned long) inode->i_ino, lblk, len);
 #endif
 
-       BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+       BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
-       ctx = ext4_get_crypto_ctx(inode);
+       ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
        if (IS_ERR(ciphertext_page)) {
                err = PTR_ERR(ciphertext_page);
                goto errout;
@@ -410,11 +415,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 
        while (len--) {
                err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
-                                      ZERO_PAGE(0), ciphertext_page);
+                                      ZERO_PAGE(0), ciphertext_page,
+                                      GFP_NOFS);
                if (err)
                        goto errout;
 
-               bio = bio_alloc(GFP_KERNEL, 1);
+               bio = bio_alloc(GFP_NOWAIT, 1);
                if (!bio) {
                        err = -ENOMEM;
                        goto errout;
@@ -473,13 +479,19 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
  */
 static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+       struct dentry *dir;
+       struct ext4_crypt_info *ci;
        int dir_has_key, cached_with_key;
 
-       if (!ext4_encrypted_inode(dir))
-               return 0;
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
 
+       dir = dget_parent(dentry);
+       if (!ext4_encrypted_inode(d_inode(dir))) {
+               dput(dir);
+               return 0;
+       }
+       ci = EXT4_I(d_inode(dir))->i_crypt_info;
        if (ci && ci->ci_keyring_key &&
            (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
                                          (1 << KEY_FLAG_REVOKED) |
@@ -489,6 +501,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
        /* this should eventually be an flag in d_flags */
        cached_with_key = dentry->d_fsdata != NULL;
        dir_has_key = (ci != NULL);
+       dput(dir);
 
        /*
         * If the dentry was cached without the key, and it is a
index 50ba27cbed034115c4ad3f608aa9758860d2cd76..561d7308b393a0347dea3b1344f409bf5fed0139 100644 (file)
@@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
                err = ext4_map_blocks(NULL, inode, &map, 0);
                if (err > 0) {
                        pgoff_t index = map.m_pblk >>
-                                       (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                                       (PAGE_SHIFT - inode->i_blkbits);
                        if (!ra_has_index(&file->f_ra, index))
                                page_cache_sync_readahead(
                                        sb->s_bdev->bd_inode->i_mapping,
                                        &file->f_ra, file,
                                        index, 1);
-                       file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+                       file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
                        bh = ext4_bread(NULL, inode, map.m_lblk, 0);
                        if (IS_ERR(bh)) {
                                err = PTR_ERR(bh);
index c0474351986597cda60b574cacf325490ab2b157..349afebe21ee192a3cd6596438dd7502d7d16c80 100644 (file)
@@ -911,6 +911,29 @@ do {                                                                              \
 
 #include "extents_status.h"
 
+/*
+ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
+ *
+ * These are needed to avoid lockdep false positives when we need to
+ * allocate blocks to the quota inode during ext4_map_blocks(), while
+ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
+ * do quota tracking for the quota inode, this avoids deadlock (as
+ * well as infinite recursion, since it isn't turtles all the way
+ * down...)
+ *
+ *  I_DATA_SEM_NORMAL - Used for most inodes
+ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
+ *                       where the second inode has larger inode number
+ *                       than the first
+ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
+ */
+enum {
+       I_DATA_SEM_NORMAL = 0,
+       I_DATA_SEM_OTHER,
+       I_DATA_SEM_QUOTA,
+};
+
+
 /*
  * fourth extended file system inode data in memory
  */
@@ -1961,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
 {
        unsigned len = le16_to_cpu(dlen);
 
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
        if (len == EXT4_MAX_REC_LEN || len == 0)
                return blocksize;
        return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
 {
        if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
                BUG();
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
        if (len < 65536)
                return cpu_to_le16(len);
        if (len == blocksize) {
@@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
 bool ext4_valid_contents_enc_mode(uint32_t mode);
 uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
 extern struct workqueue_struct *ext4_read_workqueue;
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+                                           gfp_t gfp_flags);
 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
 void ext4_restore_control_page(struct page *data_page);
 struct page *ext4_encrypt(struct inode *inode,
-                         struct page *plaintext_page);
+                         struct page *plaintext_page,
+                         gfp_t gfp_flags);
 int ext4_decrypt(struct page *page);
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                           ext4_fsblk_t pblk, ext4_lblk_t len);
index 6659e216385e0421383cd3539dbcc048830d994e..fa2208bae2e1211d8d761dd4e90e934a64575306 100644 (file)
@@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
        struct super_block *sb = inode->i_sb;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct vfsmount *mnt = filp->f_path.mnt;
-       struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+       struct dentry *dir;
        struct path path;
        char buf[64], *cp;
        int ret;
@@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                if (ext4_encryption_info(inode) == NULL)
                        return -ENOKEY;
        }
-       if (ext4_encrypted_inode(dir) &&
-           !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+
+       dir = dget_parent(file_dentry(filp));
+       if (ext4_encrypted_inode(d_inode(dir)) &&
+           !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
                ext4_warning(inode->i_sb,
                             "Inconsistent encryption contexts: %lu/%lu\n",
-                            (unsigned long) dir->i_ino,
+                            (unsigned long) d_inode(dir)->i_ino,
                             (unsigned long) inode->i_ino);
+               dput(dir);
                return -EPERM;
        }
+       dput(dir);
        /*
         * Set up the jbd2_inode if we are opening the inode for
         * writing and the journal is present
@@ -428,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
        lastoff = startoff;
        endoff = (loff_t)end_blk << blkbits;
 
-       index = startoff >> PAGE_CACHE_SHIFT;
-       end = endoff >> PAGE_CACHE_SHIFT;
+       index = startoff >> PAGE_SHIFT;
+       end = endoff >> PAGE_SHIFT;
 
        pagevec_init(&pvec, 0);
        do {
index 7cbdd3752ba50fcc076ba9ab55c76be65cff3cce..7bc6c855cc18ca992ac3830d3b3010e63597bc5d 100644 (file)
@@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
        ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
        flush_dcache_page(page);
        kunmap_atomic(kaddr);
-       zero_user_segment(page, len, PAGE_CACHE_SIZE);
+       zero_user_segment(page, len, PAGE_SIZE);
        SetPageUptodate(page);
        brelse(iloc.bh);
 
@@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
        if (!page->index)
                ret = ext4_read_inline_page(inode, page);
        else if (!PageUptodate(page)) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
        }
 
@@ -595,7 +595,7 @@ retry:
 
        if (ret) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
                ext4_orphan_add(handle, inode);
                up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@ retry:
 out:
        if (page) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (sem_held)
                up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
        if (!ext4_has_inline_data(inode)) {
                ret = 0;
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                goto out_up_read;
        }
 
@@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
        if (ret) {
                up_read(&EXT4_I(inode)->xattr_sem);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                ext4_truncate_failed_write(inode);
                return ret;
        }
@@ -829,7 +829,7 @@ out:
        up_read(&EXT4_I(inode)->xattr_sem);
        if (page) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return ret;
 }
@@ -919,7 +919,7 @@ retry_journal:
 out_release_page:
        up_read(&EXT4_I(inode)->xattr_sem);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 out_journal:
        ext4_journal_stop(handle);
 out:
@@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
                i_size_changed = 1;
        }
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        /*
         * Don't mark the inode dirty under page lock. First, it unnecessarily
index dab84a2530ff3e05794d9b3c602e0d246937c8a3..981a1fc30eaa2c291c97958ce088e3ce96ab4353 100644 (file)
@@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 /* Maximum number of blocks we map for direct IO at once. */
 #define DIO_MAX_BLOCKS 4096
 
-static handle_t *start_dio_trans(struct inode *inode,
-                                struct buffer_head *bh_result)
+/*
+ * Get blocks function for the cases that need to start a transaction -
+ * generally difference cases of direct IO and DAX IO. It also handles retries
+ * in case of ENOSPC.
+ */
+static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
+                               struct buffer_head *bh_result, int flags)
 {
        int dio_credits;
+       handle_t *handle;
+       int retries = 0;
+       int ret;
 
        /* Trim mapping request to maximum we can map at once for DIO */
        if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
                bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
        dio_credits = ext4_chunk_trans_blocks(inode,
                                      bh_result->b_size >> inode->i_blkbits);
-       return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+retry:
+       handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       ret = _ext4_get_block(inode, iblock, bh_result, flags);
+       ext4_journal_stop(handle);
+
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
+       return ret;
 }
 
 /* Get block function for DIO reads and writes to inodes without extents */
 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
                       struct buffer_head *bh, int create)
 {
-       handle_t *handle;
-       int ret;
-
        /* We don't expect handle for direct IO */
        WARN_ON_ONCE(ext4_journal_current_handle());
 
-       if (create) {
-               handle = start_dio_trans(inode, bh);
-               if (IS_ERR(handle))
-                       return PTR_ERR(handle);
-       }
-       ret = _ext4_get_block(inode, iblock, bh,
-                             create ? EXT4_GET_BLOCKS_CREATE : 0);
-       if (create)
-               ext4_journal_stop(handle);
-       return ret;
+       if (!create)
+               return _ext4_get_block(inode, iblock, bh, 0);
+       return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
 }
 
 /*
@@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock,
 static int ext4_dio_get_block_unwritten_async(struct inode *inode,
                sector_t iblock, struct buffer_head *bh_result, int create)
 {
-       handle_t *handle;
        int ret;
 
        /* We don't expect handle for direct IO */
        WARN_ON_ONCE(ext4_journal_current_handle());
 
-       handle = start_dio_trans(inode, bh_result);
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-       ret = _ext4_get_block(inode, iblock, bh_result,
-                             EXT4_GET_BLOCKS_IO_CREATE_EXT);
-       ext4_journal_stop(handle);
+       ret = ext4_get_block_trans(inode, iblock, bh_result,
+                                  EXT4_GET_BLOCKS_IO_CREATE_EXT);
 
        /*
         * When doing DIO using unwritten extents, we need io_end to convert
@@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode,
 static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
                sector_t iblock, struct buffer_head *bh_result, int create)
 {
-       handle_t *handle;
        int ret;
 
        /* We don't expect handle for direct IO */
        WARN_ON_ONCE(ext4_journal_current_handle());
 
-       handle = start_dio_trans(inode, bh_result);
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-       ret = _ext4_get_block(inode, iblock, bh_result,
-                             EXT4_GET_BLOCKS_IO_CREATE_EXT);
-       ext4_journal_stop(handle);
+       ret = ext4_get_block_trans(inode, iblock, bh_result,
+                                  EXT4_GET_BLOCKS_IO_CREATE_EXT);
 
        /*
         * Mark inode as having pending DIO writes to unwritten extents.
@@ -1057,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle,
 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
                                  get_block_t *get_block)
 {
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        unsigned to = from + len;
        struct inode *inode = page->mapping->host;
        unsigned block_start, block_end;
@@ -1069,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
        bool decrypt = false;
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(from > PAGE_CACHE_SIZE);
-       BUG_ON(to > PAGE_CACHE_SIZE);
+       BUG_ON(from > PAGE_SIZE);
+       BUG_ON(to > PAGE_SIZE);
        BUG_ON(from > to);
 
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
        head = page_buffers(page);
        bbits = ilog2(blocksize);
-       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       block = (sector_t)page->index << (PAGE_SHIFT - bbits);
 
        for (bh = head, block_start = 0; bh != head || !block_start;
            block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
         * we allocate blocks but write fails for some reason
         */
        needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
-       index = pos >> PAGE_CACHE_SHIFT;
-       from = pos & (PAGE_CACHE_SIZE - 1);
+       index = pos >> PAGE_SHIFT;
+       from = pos & (PAGE_SIZE - 1);
        to = from + len;
 
        if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1186,7 @@ retry_grab:
 retry_journal:
        handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
        if (IS_ERR(handle)) {
-               page_cache_release(page);
+               put_page(page);
                return PTR_ERR(handle);
        }
 
@@ -1196,7 +1194,7 @@ retry_journal:
        if (page->mapping != mapping) {
                /* The page got truncated from under us */
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                ext4_journal_stop(handle);
                goto retry_grab;
        }
@@ -1252,7 +1250,7 @@ retry_journal:
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
                        goto retry_journal;
-               page_cache_release(page);
+               put_page(page);
                return ret;
        }
        *pagep = page;
@@ -1295,7 +1293,7 @@ static int ext4_write_end(struct file *file,
                ret = ext4_jbd2_file_inode(handle, inode);
                if (ret) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto errout;
                }
        }
@@ -1315,7 +1313,7 @@ static int ext4_write_end(struct file *file,
         */
        i_size_changed = ext4_update_inode_size(inode, pos + copied);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file,
        int size_changed = 0;
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
-       from = pos & (PAGE_CACHE_SIZE - 1);
+       from = pos & (PAGE_SIZE - 1);
        to = from + len;
 
        BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
        EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page,
        int num_clusters;
        ext4_fsblk_t lblk;
 
-       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+       BUG_ON(stop > PAGE_SIZE || stop < length);
 
        head = page_buffers(page);
        bh = head;
@@ -1553,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page,
                        clear_buffer_delay(bh);
                } else if (contiguous_blks) {
                        lblk = page->index <<
-                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                              (PAGE_SHIFT - inode->i_blkbits);
                        lblk += (curr_off >> inode->i_blkbits) -
                                contiguous_blks;
                        ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page,
        } while ((bh = bh->b_this_page) != head);
 
        if (contiguous_blks) {
-               lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
                lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
                ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
@@ -1572,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page,
         * need to release the reserved space for that cluster. */
        num_clusters = EXT4_NUM_B2C(sbi, to_release);
        while (num_clusters > 0) {
-               lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
+               lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
                        ((num_clusters - 1) << sbi->s_cluster_bits);
                if (sbi->s_cluster_ratio == 1 ||
                    !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
        end   = mpd->next_page - 1;
        if (invalidate) {
                ext4_lblk_t start, last;
-               start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               start = index << (PAGE_SHIFT - inode->i_blkbits);
+               last = end << (PAGE_SHIFT - inode->i_blkbits);
                ext4_es_remove_extent(inode, start, last - start + 1);
        }
 
@@ -1636,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
                        BUG_ON(!PageLocked(page));
                        BUG_ON(PageWriteback(page));
                        if (invalidate) {
-                               block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+                               block_invalidatepage(page, 0, PAGE_SIZE);
                                ClearPageUptodate(page);
                        }
                        unlock_page(page);
@@ -2007,10 +2005,10 @@ static int ext4_writepage(struct page *page,
 
        trace_ext4_writepage(page);
        size = i_size_read(inode);
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
+       if (page->index == size >> PAGE_SHIFT)
+               len = size & ~PAGE_MASK;
        else
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
 
        page_bufs = page_buffers(page);
        /*
@@ -2034,7 +2032,7 @@ static int ext4_writepage(struct page *page,
                                   ext4_bh_delay_or_unwritten)) {
                redirty_page_for_writepage(wbc, page);
                if ((current->flags & PF_MEMALLOC) ||
-                   (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) {
+                   (inode->i_sb->s_blocksize == PAGE_SIZE)) {
                        /*
                         * For memory cleaning there's no point in writing only
                         * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
        int err;
 
        BUG_ON(page->index != mpd->first_page);
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
+       if (page->index == size >> PAGE_SHIFT)
+               len = size & ~PAGE_MASK;
        else
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
        clear_page_dirty_for_io(page);
        err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
        if (!err)
@@ -2213,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
        int nr_pages, i;
        struct inode *inode = mpd->inode;
        struct buffer_head *head, *bh;
-       int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
+       int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
        pgoff_t start, end;
        ext4_lblk_t lblk;
        sector_t pblock;
@@ -2274,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
                         * supports blocksize < pagesize as we will try to
                         * convert potentially unmapped parts of inode.
                         */
-                       mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
+                       mpd->io_submit.io_end->size += PAGE_SIZE;
                        /* Page fully mapped - let IO run! */
                        err = mpage_submit_page(mpd, page);
                        if (err < 0) {
@@ -2426,7 +2424,7 @@ update_disksize:
         * Update on-disk size after IO is submitted.  Races with
         * truncate are avoided by checking i_size under i_data_sem.
         */
-       disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+       disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
        if (disksize > EXT4_I(inode)->i_disksize) {
                int err2;
                loff_t i_size;
@@ -2562,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        mpd->next_page = page->index + 1;
                        /* Add all dirty buffers to mpd */
                        lblk = ((ext4_lblk_t)page->index) <<
-                               (PAGE_CACHE_SHIFT - blkbits);
+                               (PAGE_SHIFT - blkbits);
                        head = page_buffers(page);
                        err = mpage_process_page_bufs(mpd, head, head, lblk);
                        if (err <= 0)
@@ -2647,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping,
                 * We may need to convert up to one extent per block in
                 * the page and we may dirty the inode.
                 */
-               rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
+               rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
        }
 
        /*
@@ -2678,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping,
                mpd.first_page = writeback_index;
                mpd.last_page = -1;
        } else {
-               mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
-               mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
+               mpd.first_page = wbc->range_start >> PAGE_SHIFT;
+               mpd.last_page = wbc->range_end >> PAGE_SHIFT;
        }
 
        mpd.inode = inode;
@@ -2838,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        struct inode *inode = mapping->host;
        handle_t *handle;
 
-       index = pos >> PAGE_CACHE_SHIFT;
+       index = pos >> PAGE_SHIFT;
 
        if (ext4_nonda_switch(inode->i_sb)) {
                *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2879,7 @@ retry_journal:
        handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
                                ext4_da_write_credits(inode, pos, len));
        if (IS_ERR(handle)) {
-               page_cache_release(page);
+               put_page(page);
                return PTR_ERR(handle);
        }
 
@@ -2889,7 +2887,7 @@ retry_journal:
        if (page->mapping != mapping) {
                /* The page got truncated from under us */
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                ext4_journal_stop(handle);
                goto retry_grab;
        }
@@ -2917,7 +2915,7 @@ retry_journal:
                    ext4_should_retry_alloc(inode->i_sb, &retries))
                        goto retry_journal;
 
-               page_cache_release(page);
+               put_page(page);
                return ret;
        }
 
@@ -2965,7 +2963,7 @@ static int ext4_da_write_end(struct file *file,
                                      len, copied, page, fsdata);
 
        trace_ext4_da_write_end(inode, pos, len, copied);
-       start = pos & (PAGE_CACHE_SIZE - 1);
+       start = pos & (PAGE_SIZE - 1);
        end = start + copied - 1;
 
        /*
@@ -3187,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
        /*
         * If it's a full truncate we just forget about the pending dirtying
         */
-       if (offset == 0 && length == PAGE_CACHE_SIZE)
+       if (offset == 0 && length == PAGE_SIZE)
                ClearPageChecked(page);
 
        return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3554,8 @@ void ext4_set_aops(struct inode *inode)
 static int __ext4_block_zero_page_range(handle_t *handle,
                struct address_space *mapping, loff_t from, loff_t length)
 {
-       ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       ext4_fsblk_t index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned blocksize, pos;
        ext4_lblk_t iblock;
        struct inode *inode = mapping->host;
@@ -3565,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
        struct page *page;
        int err = 0;
 
-       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+       page = find_or_create_page(mapping, from >> PAGE_SHIFT,
                                   mapping_gfp_constraint(mapping, ~__GFP_FS));
        if (!page)
                return -ENOMEM;
 
        blocksize = inode->i_sb->s_blocksize;
 
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+       iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
                    ext4_encrypted_inode(inode)) {
                        /* We expect the key to be set. */
                        BUG_ON(!ext4_has_encryption_key(inode));
-                       BUG_ON(blocksize != PAGE_CACHE_SIZE);
+                       BUG_ON(blocksize != PAGE_SIZE);
                        WARN_ON_ONCE(ext4_decrypt(page));
                }
        }
@@ -3638,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
 
 unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
@@ -3653,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
                struct address_space *mapping, loff_t from, loff_t length)
 {
        struct inode *inode = mapping->host;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned blocksize = inode->i_sb->s_blocksize;
        unsigned max = blocksize - (offset & (blocksize - 1));
 
@@ -3678,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
 static int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from)
 {
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned length;
        unsigned blocksize;
        struct inode *inode = mapping->host;
@@ -3816,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
         */
        if (offset + length > inode->i_size) {
                length = inode->i_size +
-                  PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
+                  PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
                   offset;
        }
 
@@ -4891,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
        tid_t commit_tid = 0;
        int ret;
 
-       offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+       offset = inode->i_size & (PAGE_SIZE - 1);
        /*
         * All buffers in the last page remain valid? Then there's nothing to
-        * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+        * do. We do the check mainly to optimize the common PAGE_SIZE ==
         * blocksize case
         */
-       if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+       if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
                return;
        while (1) {
                page = find_lock_page(inode->i_mapping,
-                                     inode->i_size >> PAGE_CACHE_SHIFT);
+                                     inode->i_size >> PAGE_SHIFT);
                if (!page)
                        return;
                ret = __ext4_journalled_invalidatepage(page, offset,
-                                               PAGE_CACHE_SIZE - offset);
+                                               PAGE_SIZE - offset);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                if (ret != -EBUSY)
                        return;
                commit_tid = 0;
@@ -5546,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto out;
        }
 
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
+       if (page->index == size >> PAGE_SHIFT)
+               len = size & ~PAGE_MASK;
        else
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
        /*
         * Return if we have all the buffers mapped. This avoids the need to do
         * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5578,7 @@ retry_alloc:
        ret = block_page_mkwrite(vma, vmf, get_block);
        if (!ret && ext4_should_journal_data(inode)) {
                if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
-                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
+                         PAGE_SIZE, NULL, do_journal_get_write_access)) {
                        unlock_page(page);
                        ret = VM_FAULT_SIGBUS;
                        ext4_journal_stop(handle);
index 50e05df28f665d56a096f671929e23b715e00f2e..eeeade76012ecf66f59340332d6838451b4e6eda 100644 (file)
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
  *
  *
  * one block each for bitmap and buddy information.  So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
+ * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
  * blocksize) blocks.  So it can have information regarding groups_per_page
  * which is blocks_per_page/2
  *
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
  *
  * one block each for bitmap and buddy information.
  * So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
+ * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
  * So it can have information regarding groups_per_page which
  * is blocks_per_page/2
  *
@@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
        sb = inode->i_sb;
        ngroups = ext4_get_groups_count(sb);
        blocksize = 1 << inode->i_blkbits;
-       blocks_per_page = PAGE_CACHE_SIZE / blocksize;
+       blocks_per_page = PAGE_SIZE / blocksize;
 
        groups_per_page = blocks_per_page >> 1;
        if (groups_per_page == 0)
@@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
        e4b->bd_buddy_page = NULL;
        e4b->bd_bitmap_page = NULL;
 
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+       blocks_per_page = PAGE_SIZE / sb->s_blocksize;
        /*
         * the buddy cache inode stores the block bitmap
         * and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
 {
        if (e4b->bd_bitmap_page) {
                unlock_page(e4b->bd_bitmap_page);
-               page_cache_release(e4b->bd_bitmap_page);
+               put_page(e4b->bd_bitmap_page);
        }
        if (e4b->bd_buddy_page) {
                unlock_page(e4b->bd_buddy_page);
-               page_cache_release(e4b->bd_buddy_page);
+               put_page(e4b->bd_buddy_page);
        }
 }
 
@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
        might_sleep();
        mb_debug(1, "load group %u\n", group);
 
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+       blocks_per_page = PAGE_SIZE / sb->s_blocksize;
        grp = ext4_get_group_info(sb, group);
 
        e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
                         * is yet to initialize the same. So
                         * wait for it to initialize.
                         */
-                       page_cache_release(page);
+                       put_page(page);
                page = find_or_create_page(inode->i_mapping, pnum, gfp);
                if (page) {
                        BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
        page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
        if (page == NULL || !PageUptodate(page)) {
                if (page)
-                       page_cache_release(page);
+                       put_page(page);
                page = find_or_create_page(inode->i_mapping, pnum, gfp);
                if (page) {
                        BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
 
 err:
        if (page)
-               page_cache_release(page);
+               put_page(page);
        if (e4b->bd_bitmap_page)
-               page_cache_release(e4b->bd_bitmap_page);
+               put_page(e4b->bd_bitmap_page);
        if (e4b->bd_buddy_page)
-               page_cache_release(e4b->bd_buddy_page);
+               put_page(e4b->bd_buddy_page);
        e4b->bd_buddy = NULL;
        e4b->bd_bitmap = NULL;
        return ret;
@@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
 {
        if (e4b->bd_bitmap_page)
-               page_cache_release(e4b->bd_bitmap_page);
+               put_page(e4b->bd_bitmap_page);
        if (e4b->bd_buddy_page)
-               page_cache_release(e4b->bd_buddy_page);
+               put_page(e4b->bd_buddy_page);
 }
 
 
@@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb,
                /* No more items in the per group rb tree
                 * balance refcounts from ext4_mb_free_metadata()
                 */
-               page_cache_release(e4b.bd_buddy_page);
-               page_cache_release(e4b.bd_bitmap_page);
+               put_page(e4b.bd_buddy_page);
+               put_page(e4b.bd_bitmap_page);
        }
        ext4_unlock_group(sb, entry->efd_group);
        kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
                ext4_mb_put_pa(ac, ac->ac_sb, pa);
        }
        if (ac->ac_bitmap_page)
-               page_cache_release(ac->ac_bitmap_page);
+               put_page(ac->ac_bitmap_page);
        if (ac->ac_buddy_page)
-               page_cache_release(ac->ac_buddy_page);
+               put_page(ac->ac_buddy_page);
        if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
                mutex_unlock(&ac->ac_lg->lg_mutex);
        ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
                 * otherwise we'll refresh it from
                 * on-disk bitmap and lose not-yet-available
                 * blocks */
-               page_cache_get(e4b->bd_buddy_page);
-               page_cache_get(e4b->bd_bitmap_page);
+               get_page(e4b->bd_buddy_page);
+               get_page(e4b->bd_bitmap_page);
        }
        while (*n) {
                parent = *n;
index 4098acc701c3e66d5e8a229ed49828d69997655f..325cef48b39a8d23788dc17ef056ccf79a7b58f6 100644 (file)
@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
 {
        if (first < second) {
                down_write(&EXT4_I(first)->i_data_sem);
-               down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+               down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
        } else {
                down_write(&EXT4_I(second)->i_data_sem);
-               down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+               down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
 
        }
 }
@@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
        page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
        if (!page[1]) {
                unlock_page(page[0]);
-               page_cache_release(page[0]);
+               put_page(page[0]);
                return -ENOMEM;
        }
        /*
@@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
                create_empty_buffers(page, blocksize, 0);
 
        head = page_buffers(page);
-       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
        for (bh = head, block_start = 0; bh != head || !block_start;
             block++, block_start = block_end, bh = bh->b_this_page) {
                block_end = block_start + blocksize;
@@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
        int i, err2, jblocks, retries = 0;
        int replaced_count = 0;
        int from = data_offset_in_page << orig_inode->i_blkbits;
-       int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+       int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
        struct super_block *sb = orig_inode->i_sb;
        struct buffer_head *bh = NULL;
 
@@ -404,9 +404,9 @@ data_copy:
 
 unlock_pages:
        unlock_page(pagep[0]);
-       page_cache_release(pagep[0]);
+       put_page(pagep[0]);
        unlock_page(pagep[1]);
-       page_cache_release(pagep[1]);
+       put_page(pagep[1]);
 stop_journal:
        ext4_journal_stop(handle);
        if (*err == -ENOSPC &&
@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
                return -EBUSY;
        }
 
+       if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
+               ext4_debug("ext4 move extent: The argument files should "
+                       "not be quota files [ino:orig %lu, donor %lu]\n",
+                       orig_inode->i_ino, donor_inode->i_ino);
+               return -EBUSY;
+       }
+
        /* Ext4 move extent supports only extent based file */
        if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
                ext4_debug("ext4 move extent: orig file is not extents "
@@ -554,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
        struct inode *orig_inode = file_inode(o_filp);
        struct inode *donor_inode = file_inode(d_filp);
        struct ext4_ext_path *path = NULL;
-       int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+       int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
        ext4_lblk_t o_end, o_start = orig_blk;
        ext4_lblk_t d_start = donor_blk;
        int ret;
@@ -648,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
                if (o_end - o_start < cur_len)
                        cur_len = o_end - o_start;
 
-               orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
+               orig_page_index = o_start >> (PAGE_SHIFT -
                                               orig_inode->i_blkbits);
-               donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
+               donor_page_index = d_start >> (PAGE_SHIFT -
                                               donor_inode->i_blkbits);
                offset_in_page = o_start % blocks_per_page;
                if (cur_len > blocks_per_page- offset_in_page)
index d77d15f4b674485de79f7c72d3a36049fd4aa986..e4fc8ea45d7888fe3677f052e1af8dff39d443ab 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/backing-dev.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -432,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
         * the page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       if (len < PAGE_CACHE_SIZE)
-               zero_user_segment(page, len, PAGE_CACHE_SIZE);
+       if (len < PAGE_SIZE)
+               zero_user_segment(page, len, PAGE_SIZE);
        /*
         * In the first loop we prepare and mark buffers to submit. We have to
         * mark all buffers in the page before submitting so that
@@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
        if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
            nr_to_submit) {
-               data_page = ext4_encrypt(inode, page);
+               gfp_t gfp_flags = GFP_NOFS;
+
+       retry_encrypt:
+               data_page = ext4_encrypt(inode, page, gfp_flags);
                if (IS_ERR(data_page)) {
                        ret = PTR_ERR(data_page);
+                       if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
+                               if (io->io_bio) {
+                                       ext4_io_submit(io);
+                                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               }
+                               gfp_flags |= __GFP_NOFAIL;
+                               goto retry_encrypt;
+                       }
                        data_page = NULL;
                        goto out;
                }
index 5dc5e95063de2a7e42749a94464f00f7c50be4b8..dc54a4b60eba0faf0b4f8afb290925b5b57e355a 100644 (file)
@@ -23,7 +23,7 @@
  *
  * then this code just gives up and calls the buffer_head-based read function.
  * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
  *
  */
 
@@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
 
        struct inode *inode = mapping->host;
        const unsigned blkbits = inode->i_blkbits;
-       const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+       const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
        const unsigned blocksize = 1 << blkbits;
        sector_t block_in_file;
        sector_t last_block;
@@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                if (page_has_buffers(page))
                        goto confused;
 
-               block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+               block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
                last_block = block_in_file + nr_pages * blocks_per_page;
                last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
                if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                                set_error_page:
                                        SetPageError(page);
                                        zero_user_segment(page, 0,
-                                                         PAGE_CACHE_SIZE);
+                                                         PAGE_SIZE);
                                        unlock_page(page);
                                        goto next_page;
                                }
@@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                }
                if (first_hole != blocks_per_page) {
                        zero_user_segment(page, first_hole << blkbits,
-                                         PAGE_CACHE_SIZE);
+                                         PAGE_SIZE);
                        if (first_hole == 0) {
                                SetPageUptodate(page);
                                unlock_page(page);
@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
 
                        if (ext4_encrypted_inode(inode) &&
                            S_ISREG(inode->i_mode)) {
-                               ctx = ext4_get_crypto_ctx(inode);
+                               ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
                                if (IS_ERR(ctx))
                                        goto set_error_page;
                        }
@@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                        unlock_page(page);
        next_page:
                if (pages)
-                       page_cache_release(page);
+                       put_page(page);
        }
        BUG_ON(pages && !list_empty(pages));
        if (bio)
index 5392975158963118f7eeba2dcf8d560ec485e53d..304c712dbe12e8ba7d5c5abfaca30f7ef3f5e5a6 100644 (file)
@@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
                             unsigned int flags);
 static int ext4_enable_quotas(struct super_block *sb);
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
 
 static struct dquot **ext4_get_dquots(struct inode *inode)
 {
@@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = {
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
        .get_projid     = ext4_get_projid,
-       .get_next_id    = dquot_get_next_id,
+       .get_next_id    = ext4_get_next_id,
 };
 
 static const struct quotactl_ops ext4_qctl_operations = {
@@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
                return -1;
        }
        if (ext4_has_feature_quota(sb)) {
-               ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
-                        "when QUOTA feature is enabled");
-               return -1;
+               ext4_msg(sb, KERN_INFO, "Journaled quota options "
+                        "ignored when QUOTA feature is enabled");
+               return 1;
        }
        qname = match_strdup(args);
        if (!qname) {
@@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                        return -1;
                }
                if (ext4_has_feature_quota(sb)) {
-                       ext4_msg(sb, KERN_ERR,
-                                "Cannot set journaled quota options "
+                       ext4_msg(sb, KERN_INFO,
+                                "Quota format mount options ignored "
                                 "when QUOTA feature is enabled");
-                       return -1;
+                       return 1;
                }
                sbi->s_jquota_fmt = m->mount_opt;
 #endif
@@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb,
 #ifdef CONFIG_QUOTA
        if (ext4_has_feature_quota(sb) &&
            (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
-               ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
-                        "feature is enabled");
-               return 0;
-       }
-       if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+               ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
+                        "mount options ignored.");
+               clear_opt(sb, USRQUOTA);
+               clear_opt(sb, GRPQUOTA);
+       } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
                if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
                        clear_opt(sb, USRQUOTA);
 
@@ -1784,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb,
                int blocksize =
                        BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
 
-               if (blocksize < PAGE_CACHE_SIZE) {
+               if (blocksize < PAGE_SIZE) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
                                 "dioread_nolock if block size != PAGE_SIZE");
                        return 0;
@@ -3808,7 +3809,7 @@ no_journal:
        }
 
        if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
-           (blocksize != PAGE_CACHE_SIZE)) {
+           (blocksize != PAGE_SIZE)) {
                ext4_msg(sb, KERN_ERR,
                         "Unsupported blocksize for fs encryption");
                goto failed_mount_wq;
@@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
                                        EXT4_SB(sb)->s_jquota_fmt, type);
 }
 
+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+
+       /* The first argument of lockdep_set_subclass has to be
+        * *exactly* the same as the argument to init_rwsem() --- in
+        * this case, in init_once() --- or lockdep gets unhappy
+        * because the name of the lock is set using the
+        * stringification of the argument to init_rwsem().
+        */
+       (void) ei;      /* shut up clang warning if !CONFIG_LOCKDEP */
+       lockdep_set_subclass(&ei->i_data_sem, subclass);
+}
+
 /*
  * Standard function to be called on quota_on
  */
@@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
                if (err)
                        return err;
        }
-
-       return dquot_quota_on(sb, type, format_id, path);
+       lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
+       err = dquot_quota_on(sb, type, format_id, path);
+       if (err)
+               lockdep_set_quota_inode(path->dentry->d_inode,
+                                            I_DATA_SEM_NORMAL);
+       return err;
 }
 
 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
 
        /* Don't account quota for quota files to avoid recursion */
        qf_inode->i_flags |= S_NOQUOTA;
+       lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
        err = dquot_enable(qf_inode, type, format_id, flags);
        iput(qf_inode);
+       if (err)
+               lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
 
        return err;
 }
@@ -5253,6 +5275,17 @@ out:
        return len;
 }
 
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       const struct quota_format_ops   *ops;
+
+       if (!sb_has_quota_loaded(sb, qid->type))
+               return -ESRCH;
+       ops = sb_dqopt(sb)->ops[qid->type];
+       if (!ops || !ops->get_next_id)
+               return -ENOSYS;
+       return dquot_get_next_id(sb, qid);
+}
 #endif
 
 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
index 6f7ee30a89ce806ec0e4fbe27d7a0538ccd1f5c7..75ed5c2f0c167b1169a9008fbfdd2e33fd80eb83 100644 (file)
@@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
        if (res <= plen)
                paddr[res] = '\0';
        if (cpage)
-               page_cache_release(cpage);
+               put_page(cpage);
        set_delayed_call(done, kfree_link, paddr);
        return paddr;
 errout:
        if (cpage)
-               page_cache_release(cpage);
+               put_page(cpage);
        kfree(paddr);
        return ERR_PTR(res);
 }
index 0441e055c8e8b734a86a31b2f2afcac1d43a013b..e79bd32b9b798735f312ba87e3658e0796c44a2a 100644 (file)
@@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
        return error;
 }
 
+static int
+__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+                        void *end, const char *function, unsigned int line)
+{
+       struct ext4_xattr_entry *entry = IFIRST(header);
+       int error = -EFSCORRUPTED;
+
+       if (((void *) header >= end) ||
+           (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+               goto errout;
+       error = ext4_xattr_check_names(entry, end, entry);
+errout:
+       if (error)
+               __ext4_error_inode(inode, function, line, 0,
+                                  "corrupted in-inode xattr");
+       return error;
+}
+
+#define xattr_check_inode(inode, header, end) \
+       __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+
 static inline int
 ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
 {
@@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
        header = IHDR(inode, raw_inode);
        entry = IFIRST(header);
        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-       error = ext4_xattr_check_names(entry, end, entry);
+       error = xattr_check_inode(inode, header, end);
        if (error)
                goto cleanup;
        error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
        raw_inode = ext4_raw_inode(&iloc);
        header = IHDR(inode, raw_inode);
        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-       error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+       error = xattr_check_inode(inode, header, end);
        if (error)
                goto cleanup;
        error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
        is->s.here = is->s.first;
        is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
        if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
-               error = ext4_xattr_check_names(IFIRST(header), is->s.end,
-                                              IFIRST(header));
+               error = xattr_check_inode(inode, header, is->s.end);
                if (error)
                        return error;
                /* Find the named attribute. */
@@ -1356,6 +1376,10 @@ retry:
        last = entry;
        total_ino = sizeof(struct ext4_xattr_ibody_header);
 
+       error = xattr_check_inode(inode, header, end);
+       if (error)
+               goto cleanup;
+
        free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
        if (free >= new_extra_isize) {
                entry = IFIRST(header);
index e5c762b372390b59fd0ceb011fea75ab19a41fe7..5dafb9cef12e7116c545a07cbac5abc16cb76715 100644 (file)
@@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
        /* Allocate a new bio */
        bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
 
-       if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+       if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
                bio_put(bio);
                return -EFAULT;
        }
@@ -265,8 +265,8 @@ alloc_new:
 
        bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
 
-       if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
-                                                       PAGE_CACHE_SIZE) {
+       if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+                                                       PAGE_SIZE) {
                __submit_merged_bio(io);
                goto alloc_new;
        }
@@ -406,7 +406,7 @@ got_it:
         * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
         */
        if (dn.data_blkaddr == NEW_ADDR) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
                unlock_page(page);
                return page;
@@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode,
                goto got_it;
 
        if (dn.data_blkaddr == NEW_ADDR) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
        } else {
                f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode,
        }
 got_it:
        if (new_i_size && i_size_read(inode) <
-                               ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
-               i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
+                               ((loff_t)(index + 1) << PAGE_SHIFT)) {
+               i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
                /* Only the directory inode sets new_i_size */
                set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
        }
@@ -570,9 +570,9 @@ alloc:
        /* update i_size */
        fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
                                                        dn->ofs_in_node;
-       if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+       if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
                i_size_write(dn->inode,
-                               ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
+                               ((loff_t)(fofs + 1) << PAGE_SHIFT));
        return 0;
 }
 
@@ -971,7 +971,7 @@ got_it:
                                goto confused;
                        }
                } else {
-                       zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, 0, PAGE_SIZE);
                        SetPageUptodate(page);
                        unlock_page(page);
                        goto next_page;
@@ -992,7 +992,7 @@ submit_and_realloc:
                        if (f2fs_encrypted_inode(inode) &&
                                        S_ISREG(inode->i_mode)) {
 
-                               ctx = fscrypt_get_ctx(inode);
+                               ctx = fscrypt_get_ctx(inode, GFP_NOFS);
                                if (IS_ERR(ctx))
                                        goto set_error_page;
 
@@ -1021,7 +1021,7 @@ submit_and_realloc:
                goto next_page;
 set_error_page:
                SetPageError(page);
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
                unlock_page(page);
                goto next_page;
 confused:
@@ -1032,7 +1032,7 @@ confused:
                unlock_page(page);
 next_page:
                if (pages)
-                       page_cache_release(page);
+                       put_page(page);
        }
        BUG_ON(pages && !list_empty(pages));
        if (bio)
@@ -1092,14 +1092,24 @@ int do_write_data_page(struct f2fs_io_info *fio)
        }
 
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+               gfp_t gfp_flags = GFP_NOFS;
 
                /* wait for GCed encrypted page writeback */
                f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
                                                        fio->old_blkaddr);
-
-               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page);
+retry_encrypt:
+               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+                                                               gfp_flags);
                if (IS_ERR(fio->encrypted_page)) {
                        err = PTR_ERR(fio->encrypted_page);
+                       if (err == -ENOMEM) {
+                               /* flush pending ios and wait for a while */
+                               f2fs_flush_merged_bios(F2FS_I_SB(inode));
+                               congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               gfp_flags |= __GFP_NOFAIL;
+                               err = 0;
+                               goto retry_encrypt;
+                       }
                        goto out_writepage;
                }
        }
@@ -1136,7 +1146,7 @@ static int f2fs_write_data_page(struct page *page,
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = ((unsigned long long) i_size)
-                                                       >> PAGE_CACHE_SHIFT;
+                                                       >> PAGE_SHIFT;
        unsigned offset = 0;
        bool need_balance_fs = false;
        int err = 0;
@@ -1157,11 +1167,11 @@ static int f2fs_write_data_page(struct page *page,
         * If the offset is out-of-range of file size,
         * this page does not have to be written to disk.
         */
-       offset = i_size & (PAGE_CACHE_SIZE - 1);
+       offset = i_size & (PAGE_SIZE - 1);
        if ((page->index >= end_index + 1) || !offset)
                goto out;
 
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
 write:
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto redirty_out;
@@ -1267,8 +1277,8 @@ next:
                        cycled = 0;
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
                cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1458,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
         * the block addresses when there is no need to fill the page.
         */
        if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
-                                       len == PAGE_CACHE_SIZE)
+                                       len == PAGE_SIZE)
                return 0;
 
        if (f2fs_has_inline_data(inode) ||
-                       (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
+                       (pos & PAGE_MASK) >= i_size_read(inode)) {
                f2fs_lock_op(sbi);
                locked = true;
        }
@@ -1513,7 +1523,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct page *page = NULL;
-       pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
+       pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
        bool need_balance = false;
        block_t blkaddr = NULL_ADDR;
        int err = 0;
@@ -1561,22 +1571,22 @@ repeat:
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
 
-       if (len == PAGE_CACHE_SIZE)
+       if (len == PAGE_SIZE)
                goto out_update;
        if (PageUptodate(page))
                goto out_clear;
 
-       if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
-               unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+       if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+               unsigned start = pos & (PAGE_SIZE - 1);
                unsigned end = start + len;
 
                /* Reading beyond i_size is simple: memset to zero */
-               zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+               zero_user_segments(page, 0, start, end, PAGE_SIZE);
                goto out_update;
        }
 
        if (blkaddr == NEW_ADDR) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
        } else {
                struct f2fs_io_info fio = {
                        .sbi = sbi,
@@ -1688,7 +1698,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
        if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
-               (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+               (offset % PAGE_SIZE || length != PAGE_SIZE))
                return;
 
        if (PageDirty(page)) {
index 4fb6ef88a34f2836dc16a6441c678f2fe9077971..f4a61a5ff79fb3e8cd928b0318798ae775a9af0d 100644 (file)
@@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
 
        /* build curseg */
        si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
-       si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+       si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
 
        /* build dirty segmap */
        si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@ get_cache:
 
        si->page_mem = 0;
        npages = NODE_MAPPING(sbi)->nrpages;
-       si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+       si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
        npages = META_MAPPING(sbi)->nrpages;
-       si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+       si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 }
 
 static int stat_show(struct seq_file *s, void *v)
index 80641ad827459af98d39dce0e2f649a30f489c8b..af819571bce7130a887cbecef1241097fda63f9c 100644 (file)
@@ -17,8 +17,8 @@
 
 static unsigned long dir_blocks(struct inode *inode)
 {
-       return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
-                                                       >> PAGE_CACHE_SHIFT;
+       return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+                                                       >> PAGE_SHIFT;
 }
 
 static unsigned int dir_buckets(unsigned int level, int dir_level)
index bbe2cd1265d0c41f63e9788a40805ca3c855304a..7a4558d17f364e84d5d7c14925eabbd3110784bb 100644 (file)
@@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
                f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
                unlock_page(page);
        }
-       page_cache_release(page);
+       put_page(page);
 }
 
 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
index b41c3579ea9e859bd692127e30c1b998e4346fb9..90d1157a09f9b9b3dcc778d189a99d68c5512154 100644 (file)
@@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
                goto mapped;
 
        /* page is wholly or partially inside EOF */
-       if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+       if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
                                                i_size_read(inode)) {
                unsigned offset;
-               offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
-               zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+               offset = i_size_read(inode) & ~PAGE_MASK;
+               zero_user_segment(page, offset, PAGE_SIZE);
        }
        set_page_dirty(page);
        SetPageUptodate(page);
@@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                goto found;
        }
 
-       pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+       pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 
        dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
 
-       for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+       for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
                if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                /* find data/hole in dnode block */
                for (; dn.ofs_in_node < end_offset;
                                dn.ofs_in_node++, pgofs++,
-                               data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+                               data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
                        block_t blkaddr;
                        blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
 
@@ -441,7 +441,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 static int f2fs_file_open(struct inode *inode, struct file *filp)
 {
        int ret = generic_file_open(inode, filp);
-       struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+       struct dentry *dir;
 
        if (!ret && f2fs_encrypted_inode(inode)) {
                ret = fscrypt_get_encryption_info(inode);
@@ -450,9 +450,13 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
                if (!fscrypt_has_encryption_key(inode))
                        return -ENOKEY;
        }
-       if (f2fs_encrypted_inode(dir) &&
-                       !fscrypt_has_permitted_context(dir, inode))
+       dir = dget_parent(file_dentry(filp));
+       if (f2fs_encrypted_inode(d_inode(dir)) &&
+                       !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+               dput(dir);
                return -EPERM;
+       }
+       dput(dir);
        return ret;
 }
 
@@ -508,8 +512,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
 static int truncate_partial_data_page(struct inode *inode, u64 from,
                                                                bool cache_only)
 {
-       unsigned offset = from & (PAGE_CACHE_SIZE - 1);
-       pgoff_t index = from >> PAGE_CACHE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE - 1);
+       pgoff_t index = from >> PAGE_SHIFT;
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
 
@@ -529,7 +533,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
                return 0;
 truncate_out:
        f2fs_wait_on_page_writeback(page, DATA, true);
-       zero_user(page, offset, PAGE_CACHE_SIZE - offset);
+       zero_user(page, offset, PAGE_SIZE - offset);
        if (!cache_only || !f2fs_encrypted_inode(inode) ||
                                        !S_ISREG(inode->i_mode))
                set_page_dirty(page);
@@ -799,11 +803,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
        if (ret)
                return ret;
 
-       pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-       pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+       pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+       pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-       off_start = offset & (PAGE_CACHE_SIZE - 1);
-       off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+       off_start = offset & (PAGE_SIZE - 1);
+       off_end = (offset + len) & (PAGE_SIZE - 1);
 
        if (pg_start == pg_end) {
                ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +817,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
        } else {
                if (off_start) {
                        ret = fill_zero(inode, pg_start++, off_start,
-                                               PAGE_CACHE_SIZE - off_start);
+                                               PAGE_SIZE - off_start);
                        if (ret)
                                return ret;
                }
@@ -830,8 +834,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
 
                        f2fs_balance_fs(sbi, true);
 
-                       blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
-                       blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+                       blk_start = (loff_t)pg_start << PAGE_SHIFT;
+                       blk_end = (loff_t)pg_end << PAGE_SHIFT;
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
 
@@ -954,8 +958,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        if (ret)
                return ret;
 
-       pg_start = offset >> PAGE_CACHE_SHIFT;
-       pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+       pg_start = offset >> PAGE_SHIFT;
+       pg_end = (offset + len) >> PAGE_SHIFT;
 
        /* write out all dirty pages from offset */
        ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1010,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 
        truncate_pagecache_range(inode, offset, offset + len - 1);
 
-       pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-       pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+       pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+       pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-       off_start = offset & (PAGE_CACHE_SIZE - 1);
-       off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+       off_start = offset & (PAGE_SIZE - 1);
+       off_end = (offset + len) & (PAGE_SIZE - 1);
 
        if (pg_start == pg_end) {
                ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1028,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
        } else {
                if (off_start) {
                        ret = fill_zero(inode, pg_start++, off_start,
-                                               PAGE_CACHE_SIZE - off_start);
+                                               PAGE_SIZE - off_start);
                        if (ret)
                                return ret;
 
                        new_size = max_t(loff_t, new_size,
-                                       (loff_t)pg_start << PAGE_CACHE_SHIFT);
+                                       (loff_t)pg_start << PAGE_SHIFT);
                }
 
                for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1064,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        f2fs_unlock_op(sbi);
 
                        new_size = max_t(loff_t, new_size,
-                               (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+                               (loff_t)(index + 1) << PAGE_SHIFT);
                }
 
                if (off_end) {
@@ -1117,8 +1121,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        truncate_pagecache(inode, offset);
 
-       pg_start = offset >> PAGE_CACHE_SHIFT;
-       pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+       pg_start = offset >> PAGE_SHIFT;
+       pg_end = (offset + len) >> PAGE_SHIFT;
        delta = pg_end - pg_start;
        nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
@@ -1158,11 +1162,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 
        f2fs_balance_fs(sbi, true);
 
-       pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-       pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+       pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+       pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-       off_start = offset & (PAGE_CACHE_SIZE - 1);
-       off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+       off_start = offset & (PAGE_SIZE - 1);
+       off_end = (offset + len) & (PAGE_SIZE - 1);
 
        f2fs_lock_op(sbi);
 
@@ -1180,12 +1184,12 @@ noalloc:
                if (pg_start == pg_end)
                        new_size = offset + len;
                else if (index == pg_start && off_start)
-                       new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
+                       new_size = (loff_t)(index + 1) << PAGE_SHIFT;
                else if (index == pg_end)
-                       new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
+                       new_size = ((loff_t)index << PAGE_SHIFT) +
                                                                off_end;
                else
-                       new_size += PAGE_CACHE_SIZE;
+                       new_size += PAGE_SIZE;
        }
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1656,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
        if (need_inplace_update(inode))
                return -EINVAL;
 
-       pg_start = range->start >> PAGE_CACHE_SHIFT;
-       pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
+       pg_start = range->start >> PAGE_SHIFT;
+       pg_end = (range->start + range->len) >> PAGE_SHIFT;
 
        f2fs_balance_fs(sbi, true);
 
@@ -1770,7 +1774,7 @@ clear_out:
 out:
        inode_unlock(inode);
        if (!err)
-               range->len = (u64)total << PAGE_CACHE_SHIFT;
+               range->len = (u64)total << PAGE_SHIFT;
        return err;
 }
 
index 358214e9f70765887b1e1353efef1ec77555eb90..a2fbe6f427d3b9ceba086af42ac7086b718692b4 100644 (file)
@@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage)
 
        f2fs_bug_on(F2FS_P_SB(page), page->index);
 
-       zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+       zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
 
        /* Copy the whole inline data block */
        src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
        }
 
        if (page->index)
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_SIZE);
        else
                read_inline_data(page, ipage);
 
@@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
                goto out;
 
        f2fs_wait_on_page_writeback(page, DATA, true);
-       zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+       zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
 
        dentry_blk = kmap_atomic(page);
 
@@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
        stat_dec_inline_dir(dir);
        clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
 
-       if (i_size_read(dir) < PAGE_CACHE_SIZE) {
-               i_size_write(dir, PAGE_CACHE_SIZE);
+       if (i_size_read(dir) < PAGE_SIZE) {
+               i_size_write(dir, PAGE_SIZE);
                set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
        }
 
index 7876f10521019509bbae2888addea516e8cc1db2..013e57932d615fec6dcc8ce1f2246cd0a0a36c0e 100644 (file)
@@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
                goto errout;
        }
 
-       /* this is broken symlink case */
-       if (unlikely(cstr.name[0] == 0)) {
-               res = -ENOENT;
-               goto errout;
-       }
-
        if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
                /* Symlink data on the disk is corrupted */
                res = -EIO;
@@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
        if (res < 0)
                goto errout;
 
+       /* this is broken symlink case */
+       if (unlikely(pstr.name[0] == 0)) {
+               res = -ENOENT;
+               goto errout;
+       }
+
        paddr = pstr.name;
 
        /* Null-terminate the name */
        paddr[res] = '\0';
 
-       page_cache_release(cpage);
+       put_page(cpage);
        set_delayed_call(done, kfree_link, paddr);
        return paddr;
 errout:
        fscrypt_fname_free_buffer(&pstr);
-       page_cache_release(cpage);
+       put_page(cpage);
        return ERR_PTR(res);
 }
 
index 118321bd1a7fa9a71694d288615d85b660f03d7e..1a33de9d84b16a68202ee410cafc4f9ba1353e11 100644 (file)
@@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
         */
        if (type == FREE_NIDS) {
                mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
-                                                       PAGE_CACHE_SHIFT;
+                                                       PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
        } else if (type == NAT_ENTRIES) {
                mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
-                                                       PAGE_CACHE_SHIFT;
+                                                       PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
        } else if (type == DIRTY_DENTS) {
                if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
 
                for (i = 0; i <= UPDATE_INO; i++)
                        mem_size += (sbi->im[i].ino_num *
-                               sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+                               sizeof(struct ino_entry)) >> PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
        } else if (type == EXTENT_CACHE) {
                mem_size = (atomic_read(&sbi->total_ext_tree) *
                                sizeof(struct extent_tree) +
                                atomic_read(&sbi->total_ext_node) *
-                               sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+                               sizeof(struct extent_node)) >> PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
        } else {
                if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 
        src_addr = page_address(src_page);
        dst_addr = page_address(dst_page);
-       memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+       memcpy(dst_addr, src_addr, PAGE_SIZE);
        set_page_dirty(dst_page);
        f2fs_put_page(src_page, 1);
 
index 0b30cd2aeebd52b85dbfd6f1b5d2cbd624da7837..011942f94d6494ff5da99a36967b3e8ba8ff6140 100644 (file)
@@ -591,7 +591,7 @@ out:
 
        /* truncate meta pages to be used by the recovery */
        truncate_inode_pages_range(META_MAPPING(sbi),
-                       (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+                       (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
 
        if (err) {
                truncate_inode_pages_final(NODE_MAPPING(sbi));
index 6f16b39f0b528a530cae99ea3b0027bb16cbe2ae..540669d6978e69f7b1bcf27f479ef961e1eabde0 100644 (file)
@@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
                }
        }
 
-       sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+       sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
                        SUM_FOOTER_SIZE) / SUMMARY_SIZE;
        if (valid_sum_count <= sum_in_page)
                return 1;
        else if ((valid_sum_count - sum_in_page) <=
-               (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+               (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
                return 2;
        return 3;
 }
@@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
        void *dst = page_address(page);
 
        if (src)
-               memcpy(dst, src, PAGE_CACHE_SIZE);
+               memcpy(dst, src, PAGE_SIZE);
        else
-               memset(dst, 0, PAGE_CACHE_SIZE);
+               memset(dst, 0, PAGE_SIZE);
        set_page_dirty(page);
        f2fs_put_page(page, 1);
 }
@@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
                        s = (struct f2fs_summary *)(kaddr + offset);
                        seg_i->sum_blk->entries[j] = *s;
                        offset += SUMMARY_SIZE;
-                       if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+                       if (offset + SUMMARY_SIZE <= PAGE_SIZE -
                                                SUM_FOOTER_SIZE)
                                continue;
 
@@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
                        *summary = seg_i->sum_blk->entries[j];
                        written_size += SUMMARY_SIZE;
 
-                       if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+                       if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
                                                        SUM_FOOTER_SIZE)
                                continue;
 
@@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
 
        src_addr = page_address(src_page);
        dst_addr = page_address(dst_page);
-       memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+       memcpy(dst_addr, src_addr, PAGE_SIZE);
 
        set_page_dirty(dst_page);
        f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
 
        for (i = 0; i < NR_CURSEG_TYPE; i++) {
                mutex_init(&array[i].curseg_mutex);
-               array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+               array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
                if (!array[i].sum_blk)
                        return -ENOMEM;
                init_rwsem(&array[i].journal_rwsem);
index 15bb81f8dac259843b074cfb2737426b49c139a5..006f87d69921da05e071f41dc023a5d812116779 100644 (file)
@@ -984,9 +984,25 @@ static loff_t max_file_blocks(void)
        return result;
 }
 
+static int __f2fs_commit_super(struct buffer_head *bh,
+                       struct f2fs_super_block *super)
+{
+       lock_buffer(bh);
+       if (super)
+               memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+       set_buffer_uptodate(bh);
+       set_buffer_dirty(bh);
+       unlock_buffer(bh);
+
+       /* it's rare case, we can do fua all the time */
+       return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
 static inline bool sanity_check_area_boundary(struct super_block *sb,
-                                       struct f2fs_super_block *raw_super)
+                                       struct buffer_head *bh)
 {
+       struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+                                       (bh->b_data + F2FS_SUPER_OFFSET);
        u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
        u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
        u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
        u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
        u32 segment_count = le32_to_cpu(raw_super->segment_count);
        u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+       u64 main_end_blkaddr = main_blkaddr +
+                               (segment_count_main << log_blocks_per_seg);
+       u64 seg_end_blkaddr = segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg);
 
        if (segment0_blkaddr != cp_blkaddr) {
                f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
                return true;
        }
 
-       if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
-               segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+       if (main_end_blkaddr > seg_end_blkaddr) {
                f2fs_msg(sb, KERN_INFO,
-                       "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+                       "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
                        main_blkaddr,
-                       segment0_blkaddr + (segment_count << log_blocks_per_seg),
+                       segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg),
                        segment_count_main << log_blocks_per_seg);
                return true;
+       } else if (main_end_blkaddr < seg_end_blkaddr) {
+               int err = 0;
+               char *res;
+
+               /* fix in-memory information all the time */
+               raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+                               segment0_blkaddr) >> log_blocks_per_seg);
+
+               if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+                       res = "internally";
+               } else {
+                       err = __f2fs_commit_super(bh, NULL);
+                       res = err ? "failed" : "done";
+               }
+               f2fs_msg(sb, KERN_INFO,
+                       "Fix alignment : %s, start(%u) end(%u) block(%u)",
+                       res, main_blkaddr,
+                       segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg),
+                       segment_count_main << log_blocks_per_seg);
+               if (err)
+                       return true;
        }
-
        return false;
 }
 
 static int sanity_check_raw_super(struct super_block *sb,
-                       struct f2fs_super_block *raw_super)
+                               struct buffer_head *bh)
 {
+       struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+                                       (bh->b_data + F2FS_SUPER_OFFSET);
        unsigned int blocksize;
 
        if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb,
        }
 
        /* Currently, support only 4KB page cache size */
-       if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+       if (F2FS_BLKSIZE != PAGE_SIZE) {
                f2fs_msg(sb, KERN_INFO,
                        "Invalid page_cache_size (%lu), supports only 4KB\n",
-                       PAGE_CACHE_SIZE);
+                       PAGE_SIZE);
                return 1;
        }
 
@@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb,
        }
 
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
-       if (sanity_check_area_boundary(sb, raw_super))
+       if (sanity_check_area_boundary(sb, bh))
                return 1;
 
        return 0;
@@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb,
 {
        int block;
        struct buffer_head *bh;
-       struct f2fs_super_block *super, *buf;
+       struct f2fs_super_block *super;
        int err = 0;
 
        super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb,
                        continue;
                }
 
-               buf = (struct f2fs_super_block *)
-                               (bh->b_data + F2FS_SUPER_OFFSET);
-
                /* sanity checking of raw super */
-               if (sanity_check_raw_super(sb, buf)) {
+               if (sanity_check_raw_super(sb, bh)) {
                        f2fs_msg(sb, KERN_ERR,
                                "Can't find valid F2FS filesystem in %dth superblock",
                                block + 1);
@@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb,
                }
 
                if (!*raw_super) {
-                       memcpy(super, buf, sizeof(*super));
+                       memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+                                                       sizeof(*super));
                        *valid_super_block = block;
                        *raw_super = super;
                }
@@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb,
        return err;
 }
 
-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
 {
-       struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
        struct buffer_head *bh;
        int err;
 
-       bh = sb_getblk(sbi->sb, block);
+       /* write back-up superblock first */
+       bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
        if (!bh)
                return -EIO;
-
-       lock_buffer(bh);
-       memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
-       set_buffer_uptodate(bh);
-       set_buffer_dirty(bh);
-       unlock_buffer(bh);
-
-       /* it's rare case, we can do fua all the time */
-       err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+       err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
        brelse(bh);
 
-       return err;
-}
-
-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
-{
-       int err;
-
-       /* write back-up superblock first */
-       err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
-
        /* if we are in recovery path, skip writing valid superblock */
        if (recover || err)
                return err;
 
        /* write current valid superblock */
-       return __f2fs_commit_super(sbi, sbi->valid_super_block);
+       bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+       if (!bh)
+               return -EIO;
+       err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+       brelse(bh);
+       return err;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@ try_onemore:
        seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
        if (__exist_node_summaries(sbi))
                sbi->kbytes_written =
-                       le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);
+                       le64_to_cpu(seg_i->journal->info.kbytes_written);
 
        build_gc_manager(sbi);
 
index cb84f0fcc72a468c1366f498554ab4c79ac26830..bfc780c682fb8b0fdd2301398263964fe07f69a7 100644 (file)
@@ -66,11 +66,11 @@ static int
 vxfs_immed_readpage(struct file *fp, struct page *pp)
 {
        struct vxfs_inode_info  *vip = VXFS_INO(pp->mapping->host);
-       u_int64_t       offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT;
+       u_int64_t       offset = (u_int64_t)pp->index << PAGE_SHIFT;
        caddr_t         kaddr;
 
        kaddr = kmap(pp);
-       memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE);
+       memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
        kunmap(pp);
        
        flush_dcache_page(pp);
index 1cff72df0389a415ef79b14e75d773e87fae3206..a49e0cfbb686d3448a567dcf76434a041b0b326a 100644 (file)
@@ -45,7 +45,7 @@
 /*
  * Number of VxFS blocks per page.
  */
-#define VXFS_BLOCK_PER_PAGE(sbp)  ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
+#define VXFS_BLOCK_PER_PAGE(sbp)  ((PAGE_SIZE / (sbp)->s_blocksize))
 
 
 static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
        if (de) {
                ino = de->d_ino;
                kunmap(pp);
-               page_cache_release(pp);
+               put_page(pp);
        }
        
        return (ino);
@@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
        nblocks = dir_blocks(ip);
        pblocks = VXFS_BLOCK_PER_PAGE(sbp);
 
-       page = pos >> PAGE_CACHE_SHIFT;
-       offset = pos & ~PAGE_CACHE_MASK;
+       page = pos >> PAGE_SHIFT;
+       offset = pos & ~PAGE_MASK;
        block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
 
        for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
                                        continue;
 
                                offset = (char *)de - kaddr;
-                               ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+                               ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
                                if (!dir_emit(ctx, de->d_name, de->d_namelen,
                                        de->d_ino, DT_UNKNOWN)) {
                                        vxfs_put_page(pp);
@@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
                vxfs_put_page(pp);
                offset = 0;
        }
-       ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+       ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
        return 0;
 }
index 5d318c44f8554bdbf5304a707557e413089b2913..e806694d4145e272d51765d80371d32398527acb 100644 (file)
@@ -50,7 +50,7 @@ inline void
 vxfs_put_page(struct page *pp)
 {
        kunmap(pp);
-       page_cache_release(pp);
+       put_page(pp);
 }
 
 /**
index fee81e8768c955f0eaf3844d374728f5c42a0140..592cea54cea0f2e4010f80c77e74c16527c36927 100644 (file)
@@ -33,7 +33,7 @@
 /*
  * 4MB minimal write chunk size
  */
-#define MIN_WRITEBACK_PAGES    (4096UL >> (PAGE_CACHE_SHIFT - 10))
+#define MIN_WRITEBACK_PAGES    (4096UL >> (PAGE_SHIFT - 10))
 
 struct wb_completion {
        atomic_t                cnt;
index 6b35fc4860a0381ab33ccc6162ca5184555373b6..3078b679fcd1b4a74b1839a7d5cc5692360bf2e1 100644 (file)
@@ -113,7 +113,7 @@ try_again:
 
        wake_up_bit(&cookie->flags, 0);
        if (xpage)
-               page_cache_release(xpage);
+               put_page(xpage);
        __fscache_uncache_page(cookie, page);
        return true;
 
@@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object,
        }
        spin_unlock(&object->lock);
        if (xpage)
-               page_cache_release(xpage);
+               put_page(xpage);
 }
 
 /*
@@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
                spin_unlock(&cookie->stores_lock);
 
                for (i = n - 1; i >= 0; i--)
-                       page_cache_release(results[i]);
+                       put_page(results[i]);
        }
 
        _leave("");
@@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 
        radix_tree_tag_set(&cookie->stores, page->index,
                           FSCACHE_COOKIE_PENDING_TAG);
-       page_cache_get(page);
+       get_page(page);
 
        /* we only want one writer at a time, but we do need to queue new
         * writers after exclusive ops */
@@ -1026,7 +1026,7 @@ submit_failed:
        radix_tree_delete(&cookie->stores, page->index);
        spin_unlock(&cookie->stores_lock);
        wake_cookie = __fscache_unuse_cookie(cookie);
-       page_cache_release(page);
+       put_page(page);
        ret = -ENOBUFS;
        goto nobufs;
 
index ebb5e37455a07acd86f5fbf1b76d474e99b937fb..cbece1221417bb0ad5c60a143c724e6b3a5b06e8 100644 (file)
@@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
                return err;
        }
 
-       page_cache_get(newpage);
+       get_page(newpage);
 
        if (!(buf->flags & PIPE_BUF_FLAG_LRU))
                lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 
        if (err) {
                unlock_page(newpage);
-               page_cache_release(newpage);
+               put_page(newpage);
                return err;
        }
 
        unlock_page(oldpage);
-       page_cache_release(oldpage);
+       put_page(oldpage);
        cs->len = 0;
 
        return 0;
@@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
        fuse_copy_finish(cs);
 
        buf = cs->pipebufs;
-       page_cache_get(page);
+       get_page(page);
        buf->page = page;
        buf->offset = offset;
        buf->len = count;
@@ -1435,7 +1435,7 @@ out_unlock:
 
 out:
        for (; page_nr < cs.nr_segs; page_nr++)
-               page_cache_release(bufs[page_nr].page);
+               put_page(bufs[page_nr].page);
 
        kfree(bufs);
        return ret;
@@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
                goto out_up_killsb;
 
        mapping = inode->i_mapping;
-       index = outarg.offset >> PAGE_CACHE_SHIFT;
-       offset = outarg.offset & ~PAGE_CACHE_MASK;
+       index = outarg.offset >> PAGE_SHIFT;
+       offset = outarg.offset & ~PAGE_MASK;
        file_size = i_size_read(inode);
        end = outarg.offset + outarg.size;
        if (end > file_size) {
@@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
                if (!page)
                        goto out_iput;
 
-               this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+               this_num = min_t(unsigned, num, PAGE_SIZE - offset);
                err = fuse_copy_page(cs, &page, offset, this_num, 0);
                if (!err && offset == 0 &&
-                   (this_num == PAGE_CACHE_SIZE || file_size == end))
+                   (this_num == PAGE_SIZE || file_size == end))
                        SetPageUptodate(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                if (err)
                        goto out_iput;
@@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        size_t total_len = 0;
        int num_pages;
 
-       offset = outarg->offset & ~PAGE_CACHE_MASK;
+       offset = outarg->offset & ~PAGE_MASK;
        file_size = i_size_read(inode);
 
        num = outarg->size;
@@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->page_descs[0].offset = offset;
        req->end = fuse_retrieve_end;
 
-       index = outarg->offset >> PAGE_CACHE_SHIFT;
+       index = outarg->offset >> PAGE_SHIFT;
 
        while (num && req->num_pages < num_pages) {
                struct page *page;
@@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
                if (!page)
                        break;
 
-               this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+               this_num = min_t(unsigned, num, PAGE_SIZE - offset);
                req->pages[req->num_pages] = page;
                req->page_descs[req->num_pages].length = this_num;
                req->num_pages++;
index 9dde38f12c07bfec48255b99ee49d02539e52dec..dcad5e2105252fa277a1c66ef83d8a960d2a082b 100644 (file)
@@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
                pgoff_t curr_index;
 
                BUG_ON(req->inode != inode);
-               curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
                if (idx_from < curr_index + req->num_pages &&
                    curr_index <= idx_to) {
                        found = true;
@@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
                 * present there.
                 */
                int i;
-               int start_idx = num_read >> PAGE_CACHE_SHIFT;
-               size_t off = num_read & (PAGE_CACHE_SIZE - 1);
+               int start_idx = num_read >> PAGE_SHIFT;
+               size_t off = num_read & (PAGE_SIZE - 1);
 
                for (i = start_idx; i < req->num_pages; i++) {
-                       zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE);
+                       zero_user_segment(req->pages[i], off, PAGE_SIZE);
                        off = 0;
                }
        } else {
@@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page)
        struct fuse_req *req;
        size_t num_read;
        loff_t pos = page_offset(page);
-       size_t count = PAGE_CACHE_SIZE;
+       size_t count = PAGE_SIZE;
        u64 attr_ver;
        int err;
 
@@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
                else
                        SetPageError(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (req->ff)
                fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
        struct fuse_file *ff = file->private_data;
        struct fuse_conn *fc = ff->fc;
        loff_t pos = page_offset(req->pages[0]);
-       size_t count = req->num_pages << PAGE_CACHE_SHIFT;
+       size_t count = req->num_pages << PAGE_SHIFT;
 
        req->out.argpages = 1;
        req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
 
        if (req->num_pages &&
            (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
-            (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
+            (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
             req->pages[req->num_pages - 1]->index + 1 != page->index)) {
                int nr_alloc = min_t(unsigned, data->nr_pages,
                                     FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
                return -EIO;
        }
 
-       page_cache_get(page);
+       get_page(page);
        req->pages[req->num_pages] = page;
        req->page_descs[req->num_pages].length = PAGE_SIZE;
        req->num_pages++;
@@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
 
-               if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
+               if (!req->out.h.error && !offset && count >= PAGE_SIZE)
                        SetPageUptodate(page);
 
-               if (count > PAGE_CACHE_SIZE - offset)
-                       count -= PAGE_CACHE_SIZE - offset;
+               if (count > PAGE_SIZE - offset)
+                       count -= PAGE_SIZE - offset;
                else
                        count = 0;
                offset = 0;
 
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        return res;
@@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                               struct iov_iter *ii, loff_t pos)
 {
        struct fuse_conn *fc = get_fuse_conn(mapping->host);
-       unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned offset = pos & (PAGE_SIZE - 1);
        size_t count = 0;
        int err;
 
@@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
        do {
                size_t tmp;
                struct page *page;
-               pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-               size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
+               pgoff_t index = pos >> PAGE_SHIFT;
+               size_t bytes = min_t(size_t, PAGE_SIZE - offset,
                                     iov_iter_count(ii));
 
                bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                iov_iter_advance(ii, tmp);
                if (!tmp) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        bytes = min(bytes, iov_iter_single_seg_count(ii));
                        goto again;
                }
@@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                count += tmp;
                pos += tmp;
                offset += tmp;
-               if (offset == PAGE_CACHE_SIZE)
+               if (offset == PAGE_SIZE)
                        offset = 0;
 
                if (!fc->big_writes)
@@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
 static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
 {
        return min_t(unsigned,
-                    ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
-                    (pos >> PAGE_CACHE_SHIFT) + 1,
+                    ((pos + len - 1) >> PAGE_SHIFT) -
+                    (pos >> PAGE_SHIFT) + 1,
                     FUSE_MAX_PAGES_PER_REQ);
 }
 
@@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        goto out;
 
                invalidate_mapping_pages(file->f_mapping,
-                                        pos >> PAGE_CACHE_SHIFT,
-                                        endbyte >> PAGE_CACHE_SHIFT);
+                                        pos >> PAGE_SHIFT,
+                                        endbyte >> PAGE_SHIFT);
 
                written += written_buffered;
                iocb->ki_pos = pos + written_buffered;
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
 
        *nbytesp = nbytes;
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 
 static inline int fuse_iter_npages(const struct iov_iter *ii_p)
@@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
        size_t nmax = write ? fc->max_write : fc->max_read;
        loff_t pos = *ppos;
        size_t count = iov_iter_count(iter);
-       pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+       pgoff_t idx_from = pos >> PAGE_SHIFT;
+       pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
        ssize_t res = 0;
        struct fuse_req *req;
        int err = 0;
@@ -1466,7 +1466,7 @@ __acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        struct fuse_write_in *inarg = &req->misc.write.in;
-       __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
+       __u64 data_size = req->num_pages * PAGE_SIZE;
 
        if (!fc->connected)
                goto out_free;
@@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
        list_del(&new_req->writepages_entry);
        list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
                BUG_ON(old_req->inode != new_req->inode);
-               curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
                if (curr_index <= page->index &&
                    page->index < curr_index + old_req->num_pages) {
                        found = true;
@@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
        new_req->num_pages = 1;
        for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
                BUG_ON(tmp->inode != new_req->inode);
-               curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
                if (tmp->num_pages == 1 &&
                    curr_index == page->index) {
                        old_req = tmp;
@@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page,
 
        if (req && req->num_pages &&
            (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
-            (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+            (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
             data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
                fuse_writepages_send(data);
                data->req = NULL;
@@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
                loff_t pos, unsigned len, unsigned flags,
                struct page **pagep, void **fsdata)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct fuse_conn *fc = get_fuse_conn(file_inode(file));
        struct page *page;
        loff_t fsize;
@@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
 
        fuse_wait_on_page_writeback(mapping->host, page->index);
 
-       if (PageUptodate(page) || len == PAGE_CACHE_SIZE)
+       if (PageUptodate(page) || len == PAGE_SIZE)
                goto success;
        /*
         * Check if the start this page comes after the end of file, in which
         * case the readpage can be optimized away.
         */
        fsize = i_size_read(mapping->host);
-       if (fsize <= (pos & PAGE_CACHE_MASK)) {
-               size_t off = pos & ~PAGE_CACHE_MASK;
+       if (fsize <= (pos & PAGE_MASK)) {
+               size_t off = pos & ~PAGE_MASK;
                if (off)
                        zero_user_segment(page, 0, off);
                goto success;
@@ -1960,7 +1960,7 @@ success:
 
 cleanup:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 error:
        return err;
 }
@@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
 
        if (!PageUptodate(page)) {
                /* Zero any unwritten bytes at the end of the page */
-               size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
+               size_t endoff = (pos + copied) & ~PAGE_MASK;
                if (endoff)
-                       zero_user_segment(page, endoff, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, endoff, PAGE_SIZE);
                SetPageUptodate(page);
        }
 
        fuse_write_update_size(inode, pos + copied);
        set_page_dirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return copied;
 }
index 4d69d5c0bedcdeedbf07ae4e4f63ae6ccfc85ad6..1ce67668a8e17d2d721c456ed02865e01c01f7a2 100644 (file)
@@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
 
        fuse_invalidate_attr(inode);
        if (offset >= 0) {
-               pg_start = offset >> PAGE_CACHE_SHIFT;
+               pg_start = offset >> PAGE_SHIFT;
                if (len <= 0)
                        pg_end = -1;
                else
-                       pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+                       pg_end = (offset + len - 1) >> PAGE_SHIFT;
                invalidate_inode_pages2_range(inode->i_mapping,
                                              pg_start, pg_end);
        }
@@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                process_init_limits(fc, arg);
 
                if (arg->minor >= 6) {
-                       ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
+                       ra_pages = arg->max_readahead / PAGE_SIZE;
                        if (arg->flags & FUSE_ASYNC_READ)
                                fc->async_read = 1;
                        if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
                } else {
-                       ra_pages = fc->max_read / PAGE_CACHE_SIZE;
+                       ra_pages = fc->max_read / PAGE_SIZE;
                        fc->no_lock = 1;
                        fc->no_flock = 1;
                }
@@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
 
        arg->major = FUSE_KERNEL_VERSION;
        arg->minor = FUSE_KERNEL_MINOR_VERSION;
-       arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
+       arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
        arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
                FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
        int err;
 
        fc->bdi.name = "fuse";
-       fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+       fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
        /* fuse does it's own writeback accounting */
        fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
 
@@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                        goto err;
 #endif
        } else {
-               sb->s_blocksize = PAGE_CACHE_SIZE;
-               sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+               sb->s_blocksize = PAGE_SIZE;
+               sb->s_blocksize_bits = PAGE_SHIFT;
        }
        sb->s_magic = FUSE_SUPER_MAGIC;
        sb->s_op = &fuse_super_operations;
index aa016e4b8bec976d57b8a36cdbb3c12c7e8e39ff..1bbbee945f46dc2f1e52da3c83a494cdea722315 100644 (file)
@@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page,
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        loff_t i_size = i_size_read(inode);
-       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       pgoff_t end_index = i_size >> PAGE_SHIFT;
        unsigned offset;
 
        if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page,
        if (current->journal_info)
                goto redirty;
        /* Is the page fully outside i_size? (truncate in progress) */
-       offset = i_size & (PAGE_CACHE_SIZE-1);
+       offset = i_size & (PAGE_SIZE-1);
        if (page->index > end_index || (page->index == end_index && !offset)) {
-               page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
                goto out;
        }
        return 1;
@@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
+       unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
        int i;
        int ret;
 
@@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
                        cycled = 0;
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
                cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
         * so we need to supply one here. It doesn't happen often.
         */
        if (unlikely(page->index)) {
-               zero_user(page, 0, PAGE_CACHE_SIZE);
+               zero_user(page, 0, PAGE_SIZE);
                SetPageUptodate(page);
                return 0;
        }
@@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
                dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
        memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
-       memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+       memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
        kunmap_atomic(kaddr);
        flush_dcache_page(page);
        brelse(dibh);
@@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
                        unsigned size)
 {
        struct address_space *mapping = ip->i_inode.i_mapping;
-       unsigned long index = *pos / PAGE_CACHE_SIZE;
-       unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
+       unsigned long index = *pos / PAGE_SIZE;
+       unsigned offset = *pos & (PAGE_SIZE - 1);
        unsigned copied = 0;
        unsigned amt;
        struct page *page;
@@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
 
        do {
                amt = size - copied;
-               if (offset + size > PAGE_CACHE_SIZE)
-                       amt = PAGE_CACHE_SIZE - offset;
+               if (offset + size > PAGE_SIZE)
+                       amt = PAGE_SIZE - offset;
                page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
                if (IS_ERR(page))
                        return PTR_ERR(page);
                p = kmap_atomic(page);
                memcpy(buf + copied, p + offset, amt);
                kunmap_atomic(p);
-               page_cache_release(page);
+               put_page(page);
                copied += amt;
                index++;
                offset = 0;
@@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
        unsigned requested = 0;
        int alloc_required;
        int error = 0;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       pgoff_t index = pos >> PAGE_SHIFT;
+       unsigned from = pos & (PAGE_SIZE - 1);
        struct page *page;
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                rblocks += gfs2_rg_blocks(ip, requested);
 
        error = gfs2_trans_begin(sdp, rblocks,
-                                PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+                                PAGE_SIZE/sdp->sd_sb.sb_bsize);
        if (error)
                goto out_trans_fail;
 
@@ -727,7 +727,7 @@ out:
                return 0;
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        gfs2_trans_end(sdp);
        if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
        if (!PageUptodate(page))
                SetPageUptodate(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (copied) {
                if (inode->i_size < to)
@@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
        struct buffer_head *dibh;
-       unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned int from = pos & (PAGE_SIZE - 1);
        unsigned int to = from + len;
        int ret;
        struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
        ret = gfs2_meta_inode_buffer(ip, &dibh);
        if (unlikely(ret)) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                goto failed;
        }
 
@@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
 {
        struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
        unsigned int stop = offset + length;
-       int partial_page = (offset || length < PAGE_CACHE_SIZE);
+       int partial_page = (offset || length < PAGE_SIZE);
        struct buffer_head *bh, *head;
        unsigned long pos = 0;
 
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
         * the first place, mapping->nr_pages will always be zero.
         */
        if (mapping->nrpages) {
-               loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1);
+               loff_t lstart = offset & ~(PAGE_SIZE - 1);
                loff_t len = iov_iter_count(iter);
                loff_t end = PAGE_ALIGN(offset + len) - 1;
 
index 0860f0b5b3f190820aba952dd52b561b983b9448..24ce1cdd434abf6a8539d4d326d4774deb351256 100644 (file)
@@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
                        dsize = dibh->b_size - sizeof(struct gfs2_dinode);
 
                memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
-               memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+               memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
                kunmap(page);
 
                SetPageUptodate(page);
@@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
 
        if (release) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        return 0;
@@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 {
        struct inode *inode = mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       unsigned long index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned long index = from >> PAGE_SHIFT;
+       unsigned offset = from & (PAGE_SIZE-1);
        unsigned blocksize, iblock, length, pos;
        struct buffer_head *bh;
        struct page *page;
@@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 
        blocksize = inode->i_sb->s_blocksize;
        length = blocksize - (offset & (blocksize - 1));
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+       iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
        mark_buffer_dirty(bh);
 unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index c9384f932975efb8075d2fd2013116e8be9f055a..208efc70ad49e992e71730d87497fb96b42e77c2 100644 (file)
@@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct buffer_head bh;
-       unsigned long size = PAGE_CACHE_SIZE;
-       u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       unsigned long size = PAGE_SIZE;
+       u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
 
        do {
                bh.b_state = 0;
@@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned long last_index;
-       u64 pos = page->index << PAGE_CACHE_SHIFT;
+       u64 pos = page->index << PAGE_SHIFT;
        unsigned int data_blocks, ind_blocks, rblocks;
        struct gfs2_holder gh;
        loff_t size;
@@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto out;
 
-       gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
+       gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
        set_bit(GIF_SW_PAGED, &ip->i_flags);
 
-       if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
+       if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
                lock_page(page);
                if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
                        ret = -EAGAIN;
@@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto out_unlock;
 
-       gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
+       gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
        ap.target = data_blocks + ind_blocks;
        ret = gfs2_quota_lock_check(ip, &ap);
        if (ret)
@@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        lock_page(page);
        ret = -EINVAL;
        size = i_size_read(inode);
-       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       last_index = (size - 1) >> PAGE_SHIFT;
        /* Check page index against inode size */
        if (size == 0 || (page->index > last_index))
                goto out_trans_end;
@@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
                        rblocks += data_blocks ? data_blocks : 1;
 
                error = gfs2_trans_begin(sdp, rblocks,
-                                        PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+                                        PAGE_SIZE/sdp->sd_sb.sb_bsize);
                if (error)
                        goto out_trans_fail;
 
index e137d96f1b17bc1e4250bb0153109be757660c86..0448524c11bcfca005a874861235066de2a7242e 100644 (file)
@@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
        if (mapping == NULL)
                mapping = &sdp->sd_aspace;
 
-       shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
+       shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
        index = blkno >> shift;             /* convert block to page */
        bufnum = blkno - (index << shift);  /* block buf index within page */
 
@@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
                map_bh(bh, sdp->sd_vfs, blkno);
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return bh;
 }
index a398913442591892f6b7e24e14d2b1a8e30b7394..ce7d69a2fdc03915faf2f27b26168b35bd7527a6 100644 (file)
@@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
        unsigned to_write = bytes, pg_off = off;
        int done = 0;
 
-       blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift);
+       blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
        boff = off % bsize;
 
        page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
        flush_dcache_page(page);
        kunmap_atomic(kaddr);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return 0;
 
 unlock_out:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return -EIO;
 }
 
@@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 
        nbytes = sizeof(struct gfs2_quota);
 
-       pg_beg = loc >> PAGE_CACHE_SHIFT;
-       pg_off = loc % PAGE_CACHE_SIZE;
+       pg_beg = loc >> PAGE_SHIFT;
+       pg_off = loc % PAGE_SIZE;
 
        /* If the quota straddles a page boundary, split the write in two */
-       if ((pg_off + nbytes) > PAGE_CACHE_SIZE) {
+       if ((pg_off + nbytes) > PAGE_SIZE) {
                pg_oflow = 1;
-               overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE;
+               overflow = (pg_off + nbytes) - PAGE_SIZE;
        }
 
        ptr = qp;
index 07c0265aa1953aebcd04d1d06e17d2ea53ddfc86..99a0bdac8796b11046f29c5981f32261338d8297 100644 (file)
@@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
                goto fail;
 
        rgd->rd_gl->gl_object = rgd;
-       rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
-       rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
-                                                 rgd->rd_length) * bsize) - 1;
+       rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+       rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
        if (rgd->rd_data > sdp->sd_max_rg_data)
index 221719eac5de667c1d6044697605148fca6b87e8..d77d844b668b14cbe16ab3e341f617d7db8e5f38 100644 (file)
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 
        mapping = tree->inode->i_mapping;
        off = (loff_t)cnid * tree->node_size;
-       block = off >> PAGE_CACHE_SHIFT;
-       node->page_offset = off & ~PAGE_CACHE_MASK;
+       block = off >> PAGE_SHIFT;
+       node->page_offset = off & ~PAGE_MASK;
        for (i = 0; i < tree->pages_per_bnode; i++) {
                page = read_mapping_page(mapping, block++, NULL);
                if (IS_ERR(page))
                        goto fail;
                if (PageError(page)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto fail;
                }
                node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
 
        for (i = 0; i < node->tree->pages_per_bnode; i++)
                if (node->page[i])
-                       page_cache_release(node->page[i]);
+                       put_page(node->page[i]);
        kfree(node);
 }
 
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
 
        pagep = node->page;
        memset(kmap(*pagep) + node->page_offset, 0,
-              min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
+              min((int)PAGE_SIZE, (int)tree->node_size));
        set_page_dirty(*pagep);
        kunmap(*pagep);
        for (i = 1; i < tree->pages_per_bnode; i++) {
-               memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+               memset(kmap(*++pagep), 0, PAGE_SIZE);
                set_page_dirty(*pagep);
                kunmap(*pagep);
        }
index 1ab19e660e6909c08ec5e4a68510b41b5c345d26..37cdd955eceb2d787befb3393537f47c63ff5224 100644 (file)
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
        }
 
        tree->node_size_shift = ffs(size) - 1;
-       tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
        return tree;
 
 fail_page:
-       page_cache_release(page);
+       put_page(page);
 free_inode:
        tree->inode->i_mapping->a_ops = &hfs_aops;
        iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        off = off16;
 
        off += node->page_offset;
-       pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+       pagep = node->page + (off >> PAGE_SHIFT);
        data = kmap(*pagep);
-       off &= ~PAGE_CACHE_MASK;
+       off &= ~PAGE_MASK;
        idx = 0;
 
        for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                                        }
                                }
                        }
-                       if (++off >= PAGE_CACHE_SIZE) {
+                       if (++off >= PAGE_SIZE) {
                                kunmap(*pagep);
                                data = kmap(*++pagep);
                                off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                len = hfs_brec_lenoff(node, 0, &off16);
                off = off16;
                off += node->page_offset;
-               pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+               pagep = node->page + (off >> PAGE_SHIFT);
                data = kmap(*pagep);
-               off &= ~PAGE_CACHE_MASK;
+               off &= ~PAGE_MASK;
        }
 }
 
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
                len = hfs_brec_lenoff(node, 0, &off);
        }
        off += node->page_offset + nidx / 8;
-       page = node->page[off >> PAGE_CACHE_SHIFT];
+       page = node->page[off >> PAGE_SHIFT];
        data = kmap(page);
-       off &= ~PAGE_CACHE_MASK;
+       off &= ~PAGE_MASK;
        m = 1 << (~nidx & 7);
        byte = data[off];
        if (!(byte & m)) {
index 6686bf39a5b5a0b4bb83de60527cd873b612fc35..cb1e5faa2fb71d44fe300a65421e5bfbf1199e04 100644 (file)
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
        if (!tree)
                return 0;
 
-       if (tree->node_size >= PAGE_CACHE_SIZE) {
-               nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+       if (tree->node_size >= PAGE_SIZE) {
+               nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
                spin_lock(&tree->hash_lock);
                node = hfs_bnode_findhash(tree, nidx);
                if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
                }
                spin_unlock(&tree->hash_lock);
        } else {
-               nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
-               i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+               nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+               i = 1 << (PAGE_SHIFT - tree->node_size_shift);
                spin_lock(&tree->hash_lock);
                do {
                        node = hfs_bnode_findhash(tree, nidx++);
index d2954451519ed64bc93028d33341f4ad85a8f990..c0ae274c0a22560ff9850aa04daf2e3f465e8f17 100644 (file)
@@ -13,7 +13,7 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 
-#define PAGE_CACHE_BITS        (PAGE_CACHE_SIZE * 8)
+#define PAGE_CACHE_BITS        (PAGE_SIZE * 8)
 
 int hfsplus_block_allocate(struct super_block *sb, u32 size,
                u32 offset, u32 *max)
index 63924662aaf3efa3b80cb732e409499e5cb2f87e..ce014ceb89efcc0b810f99a8cac4bbf815f21bfc 100644 (file)
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
        int l;
 
        off += node->page_offset;
-       pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-       off &= ~PAGE_CACHE_MASK;
+       pagep = node->page + (off >> PAGE_SHIFT);
+       off &= ~PAGE_MASK;
 
-       l = min_t(int, len, PAGE_CACHE_SIZE - off);
+       l = min_t(int, len, PAGE_SIZE - off);
        memcpy(buf, kmap(*pagep) + off, l);
        kunmap(*pagep);
 
        while ((len -= l) != 0) {
                buf += l;
-               l = min_t(int, len, PAGE_CACHE_SIZE);
+               l = min_t(int, len, PAGE_SIZE);
                memcpy(buf, kmap(*++pagep), l);
                kunmap(*pagep);
        }
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
        int l;
 
        off += node->page_offset;
-       pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-       off &= ~PAGE_CACHE_MASK;
+       pagep = node->page + (off >> PAGE_SHIFT);
+       off &= ~PAGE_MASK;
 
-       l = min_t(int, len, PAGE_CACHE_SIZE - off);
+       l = min_t(int, len, PAGE_SIZE - off);
        memcpy(kmap(*pagep) + off, buf, l);
        set_page_dirty(*pagep);
        kunmap(*pagep);
 
        while ((len -= l) != 0) {
                buf += l;
-               l = min_t(int, len, PAGE_CACHE_SIZE);
+               l = min_t(int, len, PAGE_SIZE);
                memcpy(kmap(*++pagep), buf, l);
                set_page_dirty(*pagep);
                kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
        int l;
 
        off += node->page_offset;
-       pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-       off &= ~PAGE_CACHE_MASK;
+       pagep = node->page + (off >> PAGE_SHIFT);
+       off &= ~PAGE_MASK;
 
-       l = min_t(int, len, PAGE_CACHE_SIZE - off);
+       l = min_t(int, len, PAGE_SIZE - off);
        memset(kmap(*pagep) + off, 0, l);
        set_page_dirty(*pagep);
        kunmap(*pagep);
 
        while ((len -= l) != 0) {
-               l = min_t(int, len, PAGE_CACHE_SIZE);
+               l = min_t(int, len, PAGE_SIZE);
                memset(kmap(*++pagep), 0, l);
                set_page_dirty(*pagep);
                kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
        tree = src_node->tree;
        src += src_node->page_offset;
        dst += dst_node->page_offset;
-       src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
-       src &= ~PAGE_CACHE_MASK;
-       dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
-       dst &= ~PAGE_CACHE_MASK;
+       src_page = src_node->page + (src >> PAGE_SHIFT);
+       src &= ~PAGE_MASK;
+       dst_page = dst_node->page + (dst >> PAGE_SHIFT);
+       dst &= ~PAGE_MASK;
 
        if (src == dst) {
-               l = min_t(int, len, PAGE_CACHE_SIZE - src);
+               l = min_t(int, len, PAGE_SIZE - src);
                memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
                kunmap(*src_page);
                set_page_dirty(*dst_page);
                kunmap(*dst_page);
 
                while ((len -= l) != 0) {
-                       l = min_t(int, len, PAGE_CACHE_SIZE);
+                       l = min_t(int, len, PAGE_SIZE);
                        memcpy(kmap(*++dst_page), kmap(*++src_page), l);
                        kunmap(*src_page);
                        set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
                do {
                        src_ptr = kmap(*src_page) + src;
                        dst_ptr = kmap(*dst_page) + dst;
-                       if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
-                               l = PAGE_CACHE_SIZE - src;
+                       if (PAGE_SIZE - src < PAGE_SIZE - dst) {
+                               l = PAGE_SIZE - src;
                                src = 0;
                                dst += l;
                        } else {
-                               l = PAGE_CACHE_SIZE - dst;
+                               l = PAGE_SIZE - dst;
                                src += l;
                                dst = 0;
                        }
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
        dst += node->page_offset;
        if (dst > src) {
                src += len - 1;
-               src_page = node->page + (src >> PAGE_CACHE_SHIFT);
-               src = (src & ~PAGE_CACHE_MASK) + 1;
+               src_page = node->page + (src >> PAGE_SHIFT);
+               src = (src & ~PAGE_MASK) + 1;
                dst += len - 1;
-               dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
-               dst = (dst & ~PAGE_CACHE_MASK) + 1;
+               dst_page = node->page + (dst >> PAGE_SHIFT);
+               dst = (dst & ~PAGE_MASK) + 1;
 
                if (src == dst) {
                        while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
                                set_page_dirty(*dst_page);
                                kunmap(*dst_page);
                                len -= src;
-                               src = PAGE_CACHE_SIZE;
+                               src = PAGE_SIZE;
                                src_page--;
                                dst_page--;
                        }
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
                                dst_ptr = kmap(*dst_page) + dst;
                                if (src < dst) {
                                        l = src;
-                                       src = PAGE_CACHE_SIZE;
+                                       src = PAGE_SIZE;
                                        dst -= l;
                                } else {
                                        l = dst;
                                        src -= l;
-                                       dst = PAGE_CACHE_SIZE;
+                                       dst = PAGE_SIZE;
                                }
                                l = min(len, l);
                                memmove(dst_ptr - l, src_ptr - l, l);
                                kunmap(*src_page);
                                set_page_dirty(*dst_page);
                                kunmap(*dst_page);
-                               if (dst == PAGE_CACHE_SIZE)
+                               if (dst == PAGE_SIZE)
                                        dst_page--;
                                else
                                        src_page--;
                        } while ((len -= l));
                }
        } else {
-               src_page = node->page + (src >> PAGE_CACHE_SHIFT);
-               src &= ~PAGE_CACHE_MASK;
-               dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
-               dst &= ~PAGE_CACHE_MASK;
+               src_page = node->page + (src >> PAGE_SHIFT);
+               src &= ~PAGE_MASK;
+               dst_page = node->page + (dst >> PAGE_SHIFT);
+               dst &= ~PAGE_MASK;
 
                if (src == dst) {
-                       l = min_t(int, len, PAGE_CACHE_SIZE - src);
+                       l = min_t(int, len, PAGE_SIZE - src);
                        memmove(kmap(*dst_page) + src,
                                kmap(*src_page) + src, l);
                        kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
                        kunmap(*dst_page);
 
                        while ((len -= l) != 0) {
-                               l = min_t(int, len, PAGE_CACHE_SIZE);
+                               l = min_t(int, len, PAGE_SIZE);
                                memmove(kmap(*++dst_page),
                                        kmap(*++src_page), l);
                                kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
                        do {
                                src_ptr = kmap(*src_page) + src;
                                dst_ptr = kmap(*dst_page) + dst;
-                               if (PAGE_CACHE_SIZE - src <
-                                               PAGE_CACHE_SIZE - dst) {
-                                       l = PAGE_CACHE_SIZE - src;
+                               if (PAGE_SIZE - src <
+                                               PAGE_SIZE - dst) {
+                                       l = PAGE_SIZE - src;
                                        src = 0;
                                        dst += l;
                                } else {
-                                       l = PAGE_CACHE_SIZE - dst;
+                                       l = PAGE_SIZE - dst;
                                        src += l;
                                        dst = 0;
                                }
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 
        mapping = tree->inode->i_mapping;
        off = (loff_t)cnid << tree->node_size_shift;
-       block = off >> PAGE_CACHE_SHIFT;
-       node->page_offset = off & ~PAGE_CACHE_MASK;
+       block = off >> PAGE_SHIFT;
+       node->page_offset = off & ~PAGE_MASK;
        for (i = 0; i < tree->pages_per_bnode; block++, i++) {
                page = read_mapping_page(mapping, block, NULL);
                if (IS_ERR(page))
                        goto fail;
                if (PageError(page)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto fail;
                }
                node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
 
        for (i = 0; i < node->tree->pages_per_bnode; i++)
                if (node->page[i])
-                       page_cache_release(node->page[i]);
+                       put_page(node->page[i]);
        kfree(node);
 }
 
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
 
        pagep = node->page;
        memset(kmap(*pagep) + node->page_offset, 0,
-              min_t(int, PAGE_CACHE_SIZE, tree->node_size));
+              min_t(int, PAGE_SIZE, tree->node_size));
        set_page_dirty(*pagep);
        kunmap(*pagep);
        for (i = 1; i < tree->pages_per_bnode; i++) {
-               memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+               memset(kmap(*++pagep), 0, PAGE_SIZE);
                set_page_dirty(*pagep);
                kunmap(*pagep);
        }
index 3345c7553edc131302b5a2202418f3c47a0e4f03..d9d1a36ba8266b9d97da2ca99c1fd197bb6f8d70 100644 (file)
@@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
        tree->node_size_shift = ffs(size) - 1;
 
        tree->pages_per_bnode =
-               (tree->node_size + PAGE_CACHE_SIZE - 1) >>
-               PAGE_CACHE_SHIFT;
+               (tree->node_size + PAGE_SIZE - 1) >>
+               PAGE_SHIFT;
 
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
        return tree;
 
  fail_page:
-       page_cache_release(page);
+       put_page(page);
  free_inode:
        tree->inode->i_mapping->a_ops = &hfsplus_aops;
        iput(tree->inode);
@@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        off = off16;
 
        off += node->page_offset;
-       pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+       pagep = node->page + (off >> PAGE_SHIFT);
        data = kmap(*pagep);
-       off &= ~PAGE_CACHE_MASK;
+       off &= ~PAGE_MASK;
        idx = 0;
 
        for (;;) {
@@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                                        }
                                }
                        }
-                       if (++off >= PAGE_CACHE_SIZE) {
+                       if (++off >= PAGE_SIZE) {
                                kunmap(*pagep);
                                data = kmap(*++pagep);
                                off = 0;
@@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                len = hfs_brec_lenoff(node, 0, &off16);
                off = off16;
                off += node->page_offset;
-               pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+               pagep = node->page + (off >> PAGE_SHIFT);
                data = kmap(*pagep);
-               off &= ~PAGE_CACHE_MASK;
+               off &= ~PAGE_MASK;
        }
 }
 
@@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
                len = hfs_brec_lenoff(node, 0, &off);
        }
        off += node->page_offset + nidx / 8;
-       page = node->page[off >> PAGE_CACHE_SHIFT];
+       page = node->page[off >> PAGE_SHIFT];
        data = kmap(page);
-       off &= ~PAGE_CACHE_MASK;
+       off &= ~PAGE_MASK;
        m = 1 << (~nidx & 7);
        byte = data[off];
        if (!(byte & m)) {
index 1a6394cdb54ef59520dbcd7f7577aee5d93d2561..b28f39865c3accd94ab0576d95ddeef867c6a53f 100644 (file)
@@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
        }
        if (!tree)
                return 0;
-       if (tree->node_size >= PAGE_CACHE_SIZE) {
+       if (tree->node_size >= PAGE_SIZE) {
                nidx = page->index >>
-                       (tree->node_size_shift - PAGE_CACHE_SHIFT);
+                       (tree->node_size_shift - PAGE_SHIFT);
                spin_lock(&tree->hash_lock);
                node = hfs_bnode_findhash(tree, nidx);
                if (!node)
@@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
                spin_unlock(&tree->hash_lock);
        } else {
                nidx = page->index <<
-                       (PAGE_CACHE_SHIFT - tree->node_size_shift);
-               i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+                       (PAGE_SHIFT - tree->node_size_shift);
+               i = 1 << (PAGE_SHIFT - tree->node_size_shift);
                spin_lock(&tree->hash_lock);
                do {
                        node = hfs_bnode_findhash(tree, nidx++);
index 5d54490a136d8accfab797bdd3ed3e8bd6ae0d75..c35911362ff9edaa839976cad64f1b9912430404 100644 (file)
@@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        err = -EFBIG;
        last_fs_block = sbi->total_blocks - 1;
        last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
-                       PAGE_CACHE_SHIFT;
+                       PAGE_SHIFT;
 
        if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
            (last_fs_page > (pgoff_t)(~0ULL))) {
index ab01530b4930f4bf49aec02659a8fd903ee4a047..70e445ff0cff481f40d9fa0783122c6971853cec 100644 (file)
@@ -220,7 +220,7 @@ check_attr_tree_state_again:
 
        index = 0;
        written = 0;
-       for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
+       for (; written < node_size; index++, written += PAGE_SIZE) {
                void *kaddr;
 
                page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@ check_attr_tree_state_again:
 
                kaddr = kmap_atomic(page);
                memcpy(kaddr, buf + written,
-                       min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
+                       min_t(size_t, PAGE_SIZE, node_size - written));
                kunmap_atomic(kaddr);
 
                set_page_dirty(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
index d1abbee281d19d8f51f1417bdb56f2adebc178f0..7016653f3e417c20d488a1ee255077d0dd670631 100644 (file)
@@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
        struct inode *inode = mapping->host;
        char *buffer;
        loff_t base = page_offset(page);
-       int count = PAGE_CACHE_SIZE;
-       int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+       int count = PAGE_SIZE;
+       int end_index = inode->i_size >> PAGE_SHIFT;
        int err;
 
        if (page->index >= end_index)
-               count = inode->i_size & (PAGE_CACHE_SIZE-1);
+               count = inode->i_size & (PAGE_SIZE-1);
 
        buffer = kmap(page);
 
@@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
 
        buffer = kmap(page);
        bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
-                       PAGE_CACHE_SIZE);
+                       PAGE_SIZE);
        if (bytes_read < 0) {
                ClearPageUptodate(page);
                SetPageError(page);
@@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
                goto out;
        }
 
-       memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
+       memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
 
        ClearPageError(page);
        SetPageUptodate(page);
@@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
                              loff_t pos, unsigned len, unsigned flags,
                              struct page **pagep, void **fsdata)
 {
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
 
        *pagep = grab_cache_page_write_begin(mapping, index, flags);
        if (!*pagep)
@@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        void *buffer;
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from = pos & (PAGE_SIZE - 1);
        int err;
 
        buffer = kmap(page);
        err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
        kunmap(page);
 
-       if (!PageUptodate(page) && err == PAGE_CACHE_SIZE)
+       if (!PageUptodate(page) && err == PAGE_SIZE)
                SetPageUptodate(page);
 
        /*
@@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
        if (err > 0 && (pos > inode->i_size))
                inode->i_size = pos;
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return err;
 }
index e1f465a389d5be1b27f8fd98451312bb94b454b8..4ea71eba40a57a3dd1c2e32460021878e12f56d3 100644 (file)
@@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
        int i, chunksize;
 
        /* Find which 4k chunk and offset with in that chunk */
-       i = offset >> PAGE_CACHE_SHIFT;
-       offset = offset & ~PAGE_CACHE_MASK;
+       i = offset >> PAGE_SHIFT;
+       offset = offset & ~PAGE_MASK;
 
        while (size) {
                size_t n;
-               chunksize = PAGE_CACHE_SIZE;
+               chunksize = PAGE_SIZE;
                if (offset)
                        chunksize -= offset;
                if (chunksize > size)
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
 /*
  * Support for read() - Find the page attached to f_mapping and copy out the
  * data. Its *very* similar to do_generic_mapping_read(), we can't use that
- * since it has PAGE_CACHE_SIZE assumptions.
+ * since it has PAGE_SIZE assumptions.
  */
 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
@@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
                         * We have the page, copy it to user space buffer.
                         */
                        copied = hugetlbfs_read_actor(page, offset, to, nr);
-                       page_cache_release(page);
+                       put_page(page);
                }
                offset += copied;
                retval += copied;
index f311bf084015fd1450a859ebb31e0200ccd20ff3..2e4e834d1a9871377137a1735f7f28548b965304 100644 (file)
@@ -26,7 +26,7 @@
 #include "zisofs.h"
 
 /* This should probably be global. */
-static char zisofs_sink_page[PAGE_CACHE_SIZE];
+static char zisofs_sink_page[PAGE_SIZE];
 
 /*
  * This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
                for ( i = 0 ; i < pcount ; i++ ) {
                        if (!pages[i])
                                continue;
-                       memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE);
+                       memset(page_address(pages[i]), 0, PAGE_SIZE);
                        flush_dcache_page(pages[i]);
                        SetPageUptodate(pages[i]);
                }
-               return ((loff_t)pcount) << PAGE_CACHE_SHIFT;
+               return ((loff_t)pcount) << PAGE_SHIFT;
        }
 
        /* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
                        if (pages[curpage]) {
                                stream.next_out = page_address(pages[curpage])
                                                + poffset;
-                               stream.avail_out = PAGE_CACHE_SIZE - poffset;
+                               stream.avail_out = PAGE_SIZE - poffset;
                                poffset = 0;
                        } else {
                                stream.next_out = (void *)&zisofs_sink_page;
-                               stream.avail_out = PAGE_CACHE_SIZE;
+                               stream.avail_out = PAGE_SIZE;
                        }
                }
                if (!stream.avail_in) {
@@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
         * pages with the data we have anyway...
         */
        start_off = page_offset(pages[full_page]);
-       end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size);
+       end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
 
        cstart_block = start_off >> zisofs_block_shift;
        cend_block = (end_off + (1 << zisofs_block_shift) - 1)
                        >> zisofs_block_shift;
 
-       WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) !=
-               ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK));
+       WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
+               ((cstart_block << zisofs_block_shift) & PAGE_MASK));
 
        /* Find the pointer to this specific chunk */
        /* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
                ret = zisofs_uncompress_block(inode, block_start, block_end,
                                              pcount, pages, poffset, &err);
                poffset += ret;
-               pages += poffset >> PAGE_CACHE_SHIFT;
-               pcount -= poffset >> PAGE_CACHE_SHIFT;
-               full_page -= poffset >> PAGE_CACHE_SHIFT;
-               poffset &= ~PAGE_CACHE_MASK;
+               pages += poffset >> PAGE_SHIFT;
+               pcount -= poffset >> PAGE_SHIFT;
+               full_page -= poffset >> PAGE_SHIFT;
+               poffset &= ~PAGE_MASK;
 
                if (err) {
                        brelse(bh);
@@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
 
        if (poffset && *pages) {
                memset(page_address(*pages) + poffset, 0,
-                      PAGE_CACHE_SIZE - poffset);
+                      PAGE_SIZE - poffset);
                flush_dcache_page(*pages);
                SetPageUptodate(*pages);
        }
@@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
        int i, pcount, full_page;
        unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
        unsigned int zisofs_pages_per_cblock =
-               PAGE_CACHE_SHIFT <= zisofs_block_shift ?
-               (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0;
+               PAGE_SHIFT <= zisofs_block_shift ?
+               (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
        struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
        pgoff_t index = page->index, end_index;
 
-       end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        /*
         * If this page is wholly outside i_size we just return zero;
         * do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
                return 0;
        }
 
-       if (PAGE_CACHE_SHIFT <= zisofs_block_shift) {
+       if (PAGE_SHIFT <= zisofs_block_shift) {
                /* We have already been given one page, this is the one
                   we must do. */
                full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
                        kunmap(pages[i]);
                        unlock_page(pages[i]);
                        if (i != full_page)
-                               page_cache_release(pages[i]);
+                               put_page(pages[i]);
                }
        }                       
 
index bcd2d41b318a49688f05068bd278145b16544465..131dedc920d8db28b00a19b09dd33ca438a7d81d 100644 (file)
@@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
                 * the page with useless information without generating any
                 * I/O errors.
                 */
-               if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
+               if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
                        printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
                                __func__, b_off,
                                (unsigned long long)inode->i_size);
index 517f2de784cfca75ac91979191b99d41f1a06bd1..2ad98d6e19f43c369d9eb1f65640374e1e133b80 100644 (file)
@@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh)
        if (!trylock_page(page))
                goto nope;
 
-       page_cache_get(page);
+       get_page(page);
        __brelse(bh);
        try_to_free_buffers(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return;
 
 nope:
index de73a9516a542af75ac8bcf8aa6faa0199af7b3f..435f0b26ac2038e4f8037b5b1f2e4d15dbc9b2d2 100644 (file)
@@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal)
 
 int jbd2_journal_blocks_per_page(struct inode *inode)
 {
-       return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+       return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 }
 
 /*
index 01e4652d88f69c81d6593dd4baabfa63766b9441..67c103867bf8d5d27e84a4eb2ad2c8a09b22d328 100644 (file)
@@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
        struct buffer_head *head, *bh, *next;
        unsigned int stop = offset + length;
        unsigned int curr_off = 0;
-       int partial_page = (offset || length < PAGE_CACHE_SIZE);
+       int partial_page = (offset || length < PAGE_SIZE);
        int may_free = 1;
        int ret = 0;
 
@@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
        if (!page_has_buffers(page))
                return 0;
 
-       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+       BUG_ON(stop > PAGE_SIZE || stop < length);
 
        /* We will potentially be playing with lists other than just the
         * data lists (especially for journaled data mode), so be
index 1090eb64b90d66dac39e32677edae8f32ddde02f..9d26b1b9fc014e847f4748e7b29e11b9091e7ef9 100644 (file)
@@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
                           rather than mucking around with actually reading the node
                           and checking the compression type, which is the real way
                           to tell a hole node. */
-                       if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
-                                       && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+                       if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
+                                       && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
                                JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
                                        ref_offset(fn->raw));
                                bitched = 1;
                        }
 
-                       if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
-                                       && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+                       if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
+                                       && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
                                JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
                                       ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
                                bitched = 1;
index cad86bac345305e5c3231589c63051cba0a2da6c..0e62dec3effce59d7ce8e5069191e3321643d45b 100644 (file)
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
        int ret;
 
        jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
-                 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
+                 __func__, inode->i_ino, pg->index << PAGE_SHIFT);
 
        BUG_ON(!PageLocked(pg));
 
        pg_buf = kmap(pg);
        /* FIXME: Can kmap fail? */
 
-       ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+       ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+                                    PAGE_SIZE);
 
        if (ret) {
                ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct page *pg;
        struct inode *inode = mapping->host;
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
+       uint32_t pageofs = index << PAGE_SHIFT;
        int ret = 0;
 
        pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
 
 out_page:
        unlock_page(pg);
-       page_cache_release(pg);
+       put_page(pg);
        return ret;
 }
 
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
        struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
        struct jffs2_raw_inode *ri;
-       unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned start = pos & (PAGE_SIZE - 1);
        unsigned end = start + copied;
        unsigned aligned_start = start & ~3;
        int ret = 0;
        uint32_t writtenlen = 0;
 
        jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
-                 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
+                 __func__, inode->i_ino, pg->index << PAGE_SHIFT,
                  start, end, pg->flags);
 
        /* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
           to re-lock it. */
        BUG_ON(!PageUptodate(pg));
 
-       if (end == PAGE_CACHE_SIZE) {
+       if (end == PAGE_SIZE) {
                /* When writing out the end of a page, write out the
                   _whole_ page. This helps to reduce the number of
                   nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
                jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
                          __func__);
                unlock_page(pg);
-               page_cache_release(pg);
+               put_page(pg);
                return -ENOMEM;
        }
 
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
        kmap(pg);
 
        ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
-                                     (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+                                     (pg->index << PAGE_SHIFT) + aligned_start,
                                      end - aligned_start, &writtenlen);
 
        kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
        jffs2_dbg(1, "%s() returning %d\n",
                  __func__, writtenlen > 0 ? writtenlen : ret);
        unlock_page(pg);
-       page_cache_release(pg);
+       put_page(pg);
        return writtenlen > 0 ? writtenlen : ret;
 }
index bead25ae8fe4a563d257995102da2cbe7336eeb3..ae2ebb26b4468fdf4d1c528b65db24d2618c1ead 100644 (file)
@@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
                goto out_root;
 
        sb->s_maxbytes = 0xFFFFFFFF;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = JFFS2_SUPER_MAGIC;
        if (!(sb->s_flags & MS_RDONLY))
                jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
        struct inode *inode = OFNI_EDONI_2SFFJ(f);
        struct page *pg;
 
-       pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+       pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
                             (void *)jffs2_do_readpage_unlock, inode);
        if (IS_ERR(pg))
                return (void *)pg;
@@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
        struct page *pg = (void *)*priv;
 
        kunmap(pg);
-       page_cache_release(pg);
+       put_page(pg);
 }
 
 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
index 7e553f286775f7d20ea73b8f7982b51264ba1e3c..9ed0f26cf0238f2d4c6de247da6121abe9059ea3 100644 (file)
@@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_era
                                goto upnout;
                }
                /* We found a datanode. Do the GC */
-               if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
+               if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
                        /* It crosses a page boundary. Therefore, it must be a hole. */
                        ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
                } else {
@@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
                struct jffs2_node_frag *frag;
                uint32_t min, max;
 
-               min = start & ~(PAGE_CACHE_SIZE-1);
-               max = min + PAGE_CACHE_SIZE;
+               min = start & ~(PAGE_SIZE-1);
+               max = min + PAGE_SIZE;
 
                frag = jffs2_lookup_node_frag(&f->fragtree, start);
 
@@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
                cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
                datalen = end - offset;
 
-               writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
+               writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
 
                comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
 
index 9a5449bc3afb0b3b784bfb04c69b011219f013d2..b86c78d178c60a3af10f6ae4c688d6acce8f8dbf 100644 (file)
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list,
 
        /* If the last fragment starts at the RAM page boundary, it is
         * REF_PRISTINE irrespective of its size. */
-       if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
+       if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
                dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
                        frag->ofs, frag->ofs + frag->size);
                frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
                   If so, both 'this' and the new node get marked REF_NORMAL so
                   the GC can take a look.
                */
-               if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+               if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
                        if (this->node)
                                mark_ref_normal(this->node->raw);
                        mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
 
        /* If we now share a page with other nodes, mark either previous
           or next node REF_NORMAL, as appropriate.  */
-       if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+       if (newfrag->ofs & (PAGE_SIZE-1)) {
                struct jffs2_node_frag *prev = frag_prev(newfrag);
 
                mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
                        mark_ref_normal(prev->node->raw);
        }
 
-       if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+       if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
                struct jffs2_node_frag *next = frag_next(newfrag);
 
                if (next) {
index b634de4c81013eec049194c803226d724a3e02fc..7fb187ab2682ff3433f7f26eebb68dd5e8ad9e21 100644 (file)
@@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
           beginning of a page and runs to the end of the file, or if
           it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
        */
-       if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
-           ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+       if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
+           ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
              (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) ==  je32_to_cpu(ri->isize)))) {
                flash_ofs |= REF_PRISTINE;
        } else {
@@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
                        break;
                }
                mutex_lock(&f->sem);
-               datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+               datalen = min_t(uint32_t, writelen,
+                               PAGE_SIZE - (offset & (PAGE_SIZE-1)));
                cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
 
                comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
index a3eb316b1ac381b8d6517f1da804d0d9207d12d4..b60e015cc757505096176114980cdc5461ad9239 100644 (file)
@@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp)
 static struct kmem_cache *metapage_cache;
 static mempool_t *metapage_mempool;
 
-#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
+#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
 
 #if MPS_PER_PAGE > 1
 
@@ -316,7 +316,7 @@ static void last_write_complete(struct page *page)
        struct metapage *mp;
        unsigned int offset;
 
-       for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+       for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
                mp = page_to_mp(page, offset);
                if (mp && test_bit(META_io, &mp->flag)) {
                        if (mp->lsn)
@@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
        int bad_blocks = 0;
 
        page_start = (sector_t)page->index <<
-                    (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                    (PAGE_SHIFT - inode->i_blkbits);
        BUG_ON(!PageLocked(page));
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
 
-       for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+       for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
                mp = page_to_mp(page, offset);
 
                if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                        bio = NULL;
                } else
                        inc_io(page);
-               xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
+               xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
                pblock = metapage_get_blocks(inode, lblock, &xlen);
                if (!pblock) {
                        printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
        struct inode *inode = page->mapping->host;
        struct bio *bio = NULL;
        int block_offset;
-       int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+       int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
        sector_t page_start;    /* address of page in fs blocks */
        sector_t pblock;
        int xlen;
@@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
 
        BUG_ON(!PageLocked(page));
        page_start = (sector_t)page->index <<
-                    (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                    (PAGE_SHIFT - inode->i_blkbits);
 
        block_offset = 0;
        while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
        int ret = 1;
        int offset;
 
-       for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+       for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
                mp = page_to_mp(page, offset);
 
                if (!mp)
@@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
 static void metapage_invalidatepage(struct page *page, unsigned int offset,
                                    unsigned int length)
 {
-       BUG_ON(offset || length < PAGE_CACHE_SIZE);
+       BUG_ON(offset || length < PAGE_SIZE);
 
        BUG_ON(PageWriteback(page));
 
@@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
                 inode->i_ino, lblock, absolute);
 
        l2bsize = inode->i_blkbits;
-       l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
+       l2BlocksPerPage = PAGE_SHIFT - l2bsize;
        page_index = lblock >> l2BlocksPerPage;
        page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
-       if ((page_offset + size) > PAGE_CACHE_SIZE) {
+       if ((page_offset + size) > PAGE_SIZE) {
                jfs_err("MetaData crosses page boundary!!");
                jfs_err("lblock = %lx, size  = %d", lblock, size);
                dump_stack();
@@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
                mapping = inode->i_mapping;
        }
 
-       if (new && (PSIZE == PAGE_CACHE_SIZE)) {
+       if (new && (PSIZE == PAGE_SIZE)) {
                page = grab_cache_page(mapping, page_index);
                if (!page) {
                        jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@ unlock:
 void grab_metapage(struct metapage * mp)
 {
        jfs_info("grab_metapage: mp = 0x%p", mp);
-       page_cache_get(mp->page);
+       get_page(mp->page);
        lock_page(mp->page);
        mp->count++;
        lock_metapage(mp);
@@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp)
        jfs_info("force_metapage: mp = 0x%p", mp);
        set_bit(META_forcewrite, &mp->flag);
        clear_bit(META_sync, &mp->flag);
-       page_cache_get(page);
+       get_page(page);
        lock_page(page);
        set_page_dirty(page);
        write_one_page(page, 1);
        clear_bit(META_forcewrite, &mp->flag);
-       page_cache_release(page);
+       put_page(page);
 }
 
 void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp)
                unlock_page(mp->page);
                return;
        }
-       page_cache_get(mp->page);
+       get_page(mp->page);
        mp->count++;
        lock_metapage(mp);
        unlock_page(mp->page);
@@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp)
        assert(mp->count);
        if (--mp->count || mp->nohomeok) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                return;
        }
 
@@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp)
        drop_metapage(page, mp);
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
 {
        sector_t lblock;
-       int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
+       int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
        int BlocksPerPage = 1 << l2BlocksPerPage;
        /* All callers are interested in block device's mapping */
        struct address_space *mapping =
@@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
                page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
                if (!page)
                        continue;
-               for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+               for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
                        mp = page_to_mp(page, offset);
                        if (!mp)
                                continue;
@@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
                                remove_from_logsync(mp);
                }
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
index 337e9e51ac066f578349c15d818ab3441492d07a..a869fb4a20d66d160ea9e10468d081ec79495114 100644 (file)
@@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
        lock_page(page);
        if (!mp->nohomeok++) {
                mark_metapage_dirty(mp);
-               page_cache_get(page);
+               get_page(page);
                wait_on_page_writeback(page);
        }
        unlock_page(page);
@@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
 static inline void _metapage_homeok(struct metapage *mp)
 {
        if (!--mp->nohomeok)
-               page_cache_release(mp->page);
+               put_page(mp->page);
 }
 
 static inline void metapage_homeok(struct metapage *mp)
index 4f5d85ba8e237e91d3f314b330e53e26ad2cc22b..78d599198bf5bdced8624016056e1d1c37f23849 100644 (file)
@@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
         * Page cache is indexed by long.
         * I would use MAX_LFS_FILESIZE, but it's only half as big
         */
-       sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1,
+       sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
                             (u64)sb->s_maxbytes);
 #endif
        sb->s_time_gran = 1;
index b67dbccdaf88328bbfb187b7764c5307d3d94b48..f73541fbe7afadaee17dcb038f7be18c21b4d18f 100644 (file)
@@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
        struct dentry *root;
 
        info->sb = sb;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = magic;
        sb->s_op = &kernfs_sops;
        sb->s_time_gran = 1;
index 0ca80b2af42015c309718b3328315471808cfa4c..f3fa82ce9b700b667ce06421197e4db15ae2f52a 100644 (file)
@@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
 {
        struct inode *inode = d_inode(dentry);
        generic_fillattr(inode, stat);
-       stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
+       stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
        return 0;
 }
 EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr);
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        buf->f_type = dentry->d_sb->s_magic;
-       buf->f_bsize = PAGE_CACHE_SIZE;
+       buf->f_bsize = PAGE_SIZE;
        buf->f_namelen = NAME_MAX;
        return 0;
 }
@@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
        struct page *page;
        pgoff_t index;
 
-       index = pos >> PAGE_CACHE_SHIFT;
+       index = pos >> PAGE_SHIFT;
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
@@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
 
        *pagep = page;
 
-       if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
-               unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       if (!PageUptodate(page) && (len != PAGE_SIZE)) {
+               unsigned from = pos & (PAGE_SIZE - 1);
 
-               zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
+               zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
        }
        return 0;
 }
@@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
 
        /* zero the stale part of the page if we did a short copy */
        if (copied < len) {
-               unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+               unsigned from = pos & (PAGE_SIZE - 1);
 
                zero_user(page, from + copied, len - copied);
        }
@@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
 
        set_page_dirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return copied;
 }
@@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
        struct dentry *dentry;
        int i;
 
-       s->s_blocksize = PAGE_CACHE_SIZE;
-       s->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       s->s_blocksize = PAGE_SIZE;
+       s->s_blocksize_bits = PAGE_SHIFT;
        s->s_magic = magic;
        s->s_op = &simple_super_operations;
        s->s_time_gran = 1;
@@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
 {
        u64 last_fs_block = num_blocks - 1;
        u64 last_fs_page =
-               last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
+               last_fs_block >> (PAGE_SHIFT - blocksize_bits);
 
        if (unlikely(num_blocks == 0))
                return 0;
 
-       if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
+       if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
                return -EINVAL;
 
        if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
index a709d80c8ebcc783fe21b34b8f77424edb0b94ea..cc26f8f215f5058b1face75ef5460694bc47fa48 100644 (file)
@@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio)
 
        bio_for_each_segment_all(bvec, bio, i) {
                end_page_writeback(bvec->bv_page);
-               page_cache_release(bvec->bv_page);
+               put_page(bvec->bv_page);
        }
        bio_put(bio);
        if (atomic_dec_and_test(&super->s_pending_writes))
index 9c501449450dc9be6891e5d9c1a035ca31b5687b..b76a62b1978fd699bcf139ef1500720ae7e19cfa 100644 (file)
@@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
 
        BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
        BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
-       BUG_ON(len > PAGE_CACHE_SIZE);
-       page_start = ofs & PAGE_CACHE_MASK;
-       page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
+       BUG_ON(len > PAGE_SIZE);
+       page_start = ofs & PAGE_MASK;
+       page_end = PAGE_ALIGN(ofs + len) - 1;
        ret = mtd_write(mtd, ofs, len, &retlen, buf);
        if (ret || (retlen != len))
                return -EIO;
@@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
                if (!page)
                        continue;
                memset(page_address(page), 0xFF, PAGE_SIZE);
-               page_cache_release(page);
+               put_page(page);
        }
        return 0;
 }
@@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
                                        page_address(page));
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                if (err)
                        return err;
        }
index 542468e9bfb492f8333221a80bcfbcc4b01aa39a..ddbed2be5366ecfab739ec61fcae09b3859d3c06 100644 (file)
@@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
                if (name->len != be16_to_cpu(dd->namelen) ||
                                memcmp(name->name, dd->name, name->len)) {
                        kunmap_atomic(dd);
-                       page_cache_release(page);
+                       put_page(page);
                        continue;
                }
 
@@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry)
                return PTR_ERR(page);
        }
        index = page->index;
-       page_cache_release(page);
+       put_page(page);
 
        mutex_lock(&super->s_dirop_mutex);
        logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
                                be16_to_cpu(dd->namelen),
                                be64_to_cpu(dd->ino), dd->type);
                kunmap(page);
-               page_cache_release(page);
+               put_page(page);
                if (full)
                        break;
        }
@@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
        dd = kmap_atomic(page);
        ino = be64_to_cpu(dd->ino);
        kunmap_atomic(dd);
-       page_cache_release(page);
+       put_page(page);
 
        inode = logfs_iget(dir->i_sb, ino);
        if (IS_ERR(inode))
@@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
 
                err = logfs_write_buf(dir, page, WF_LOCK);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                if (!err)
                        grow_dir(dir, index);
                return err;
@@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
        map = kmap_atomic(page);
        memcpy(dd, map, sizeof(*dd));
        kunmap_atomic(map);
-       page_cache_release(page);
+       put_page(page);
        return 0;
 }
 
index 61eaeb1b6cac10e664539b2132607180d8fe24d0..f01ddfb1a03b6ecc5d122b0bd94c6e301a1f6fa3 100644 (file)
@@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct page *page;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
        *pagep = page;
 
-       if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+       if ((len == PAGE_SIZE) || PageUptodate(page))
                return 0;
-       if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
-               unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+       if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+               unsigned start = pos & (PAGE_SIZE - 1);
                unsigned end = start + len;
 
                /* Reading beyond i_size is simple: memset to zero */
-               zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+               zero_user_segments(page, 0, start, end, PAGE_SIZE);
                return 0;
        }
        return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        pgoff_t index = page->index;
-       unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned start = pos & (PAGE_SIZE - 1);
        unsigned end = start + copied;
        int ret = 0;
 
-       BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize);
+       BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
        BUG_ON(page->index > I3_BLOCKS);
 
        if (copied < len) {
@@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
        if (copied == 0)
                goto out; /* FIXME: do we need to update inode? */
 
-       if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) {
-               i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end);
+       if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
+               i_size_write(inode, (index << PAGE_SHIFT) + end);
                mark_inode_dirty_sync(inode);
        }
 
@@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
        }
 out:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return ret ? ret : copied;
 }
 
@@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
-       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       pgoff_t end_index = i_size >> PAGE_SHIFT;
        unsigned offset;
        u64 bix;
        level_t level;
@@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
                return __logfs_writepage(page);
 
         /* Is the page fully outside i_size? (truncate in progress) */
-       offset = i_size & (PAGE_CACHE_SIZE-1);
+       offset = i_size & (PAGE_SIZE-1);
        if (bix > end_index || offset == 0) {
                unlock_page(page);
                return 0; /* don't care */
@@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
        return __logfs_writepage(page);
 }
 
index 20973c9e52f807cf462766e6299e4c32177f1e3d..3fb8c6d67303e9ed6a842152dfdc3c648009decb 100644 (file)
@@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
 static void logfs_put_read_page(struct page *page)
 {
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@ repeat:
                        return NULL;
                err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
                if (unlikely(err)) {
-                       page_cache_release(page);
+                       put_page(page);
                        if (err == -EEXIST)
                                goto repeat;
                        return NULL;
@@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page)
 static void logfs_put_write_page(struct page *page)
 {
        logfs_unlock_write_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb,
 
        if (PagePrivate(page)) {
                ClearPagePrivate(page);
-               page_cache_release(page);
+               put_page(page);
                set_page_private(page, 0);
        }
        __free_block(sb, block);
@@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page)
        block->page = page;
 
        SetPagePrivate(page);
-       page_cache_get(page);
+       get_page(page);
        set_page_private(page, (unsigned long) block);
 
        block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index)
 
 static int logfs_read_empty(struct page *page)
 {
-       zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+       zero_user_segment(page, 0, PAGE_SIZE);
        return 0;
 }
 
@@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page,
        if (err)
                return err;
 
-       zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE);
+       zero_user_segment(page, size - pageofs, PAGE_SIZE);
        return logfs_segment_write(inode, page, shadow);
 }
 
@@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
        block->page = NULL;
        if (PagePrivate(page)) {
                ClearPagePrivate(page);
-               page_cache_release(page);
+               put_page(page);
                set_page_private(page, 0);
        }
 }
@@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
 
        if (!PagePrivate(page)) {
                SetPagePrivate(page);
-               page_cache_get(page);
+               get_page(page);
                set_page_private(page, (unsigned long) block);
        }
 
@@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode)
        logfs_disk_to_inode(di, inode);
        kunmap_atomic(di);
        move_page_to_inode(inode, page);
-       page_cache_release(page);
+       put_page(page);
        return 0;
 }
 
index d270e4b2ab6b0f9ae448ef15985c8ce459c3b2d6..1efd6055f4b0582577bd70cc6c6e32b9e2e8eed4 100644 (file)
@@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
 
                if (!PagePrivate(page)) {
                        SetPagePrivate(page);
-                       page_cache_get(page);
+                       get_page(page);
                }
-               page_cache_release(page);
+               put_page(page);
 
                buf += copylen;
                len -= copylen;
@@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area)
                memset(page_address(page) + offset, 0xff, len);
                if (!PagePrivate(page)) {
                        SetPagePrivate(page);
-                       page_cache_get(page);
+                       get_page(page);
                }
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
@@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area)
        struct logfs_super *super = logfs_super(sb);
        u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
        u32 len = super->s_segsize - area->a_used_bytes;
-       pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
-       pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+       pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
+       pgoff_t no_indizes = len >> PAGE_SHIFT;
        struct page *page;
 
        while (no_indizes) {
                page = get_mapping_page(sb, index, 0);
                BUG_ON(!page); /* FIXME: reserve a pool */
                SetPageUptodate(page);
-               memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
+               memset(page_address(page), 0xff, PAGE_SIZE);
                if (!PagePrivate(page)) {
                        SetPagePrivate(page);
-                       page_cache_get(page);
+                       get_page(page);
                }
-               page_cache_release(page);
+               put_page(page);
                index++;
                no_indizes--;
        }
@@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
                if (IS_ERR(page))
                        return PTR_ERR(page);
                memcpy(buf, page_address(page) + offset, copylen);
-               page_cache_release(page);
+               put_page(page);
 
                buf += copylen;
                len -= copylen;
@@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
 
        if (!PagePrivate(page)) {
                SetPagePrivate(page);
-               page_cache_get(page);
+               get_page(page);
                set_page_private(page, (unsigned long) block);
        }
        block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page)
 
        if (PagePrivate(page)) {
                ClearPagePrivate(page);
-               page_cache_release(page);
+               put_page(page);
                set_page_private(page, 0);
        }
        block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno)
                        continue;
                if (PagePrivate(page)) {
                        ClearPagePrivate(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
-               page_cache_release(page);
+               put_page(page);
        }
 }
 
index 54360293bcb5cd0680c3042e6f0b9b87e342c649..5751082dba52b1ffa2a85d2d89e677475f57513a 100644 (file)
@@ -48,7 +48,7 @@ void emergency_read_end(struct page *page)
        if (page == emergency_page)
                mutex_unlock(&emergency_mutex);
        else
-               page_cache_release(page);
+               put_page(page);
 }
 
 static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb,
        logfs_set_segment_erased(sb, segno, ec, 0);
        logfs_write_ds(sb, ds, segno, ec);
        err = super->s_devops->write_sb(sb, page);
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
@@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb)
                return NULL;
        last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
        if (!last || IS_ERR(last)) {
-               page_cache_release(first);
+               put_page(first);
                return NULL;
        }
 
        if (!logfs_check_ds(page_address(first))) {
-               page_cache_release(last);
+               put_page(last);
                return first;
        }
 
        /* First one didn't work, try the second superblock */
        if (!logfs_check_ds(page_address(last))) {
-               page_cache_release(first);
+               put_page(first);
                return last;
        }
 
        /* Neither worked, sorry folks */
-       page_cache_release(first);
-       page_cache_release(last);
+       put_page(first);
+       put_page(last);
        return NULL;
 }
 
@@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb)
        super->s_data_levels = ds->ds_data_levels;
        super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
                + super->s_data_levels;
-       page_cache_release(page);
+       put_page(page);
        return 0;
 }
 
index d19ac258105aadb44382650de7b50504b6eaf237..33957c07cd11a3b74f4eb6f4b563af07c17c1d31 100644 (file)
@@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = {
 static inline void dir_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page)
 static unsigned
 minix_last_byte(struct inode *inode, unsigned long page_nr)
 {
-       unsigned last_byte = PAGE_CACHE_SIZE;
+       unsigned last_byte = PAGE_SIZE;
 
-       if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
-               last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
+       if (page_nr == (inode->i_size >> PAGE_SHIFT))
+               last_byte = inode->i_size & (PAGE_SIZE - 1);
        return last_byte;
 }
 
@@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
        if (pos >= inode->i_size)
                return 0;
 
-       offset = pos & ~PAGE_CACHE_MASK;
-       n = pos >> PAGE_CACHE_SHIFT;
+       offset = pos & ~PAGE_MASK;
+       n = pos >> PAGE_SHIFT;
 
        for ( ; n < npages; n++, offset = 0) {
                char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
                lock_page(page);
                kaddr = (char*)page_address(page);
                dir_end = kaddr + minix_last_byte(dir, n);
-               limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
+               limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
                for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
                        de = (minix_dirent *)p;
                        de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
        }
 
        kaddr = kmap_atomic(page);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
+       memset(kaddr, 0, PAGE_SIZE);
 
        if (sbi->s_version == MINIX_V3) {
                minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
 
        err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index a795a11e50c728a9d31511fbc2849e8fb6fbeb8c..2887d1d95ce244e8ad59cd2585676888277958b3 100644 (file)
@@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        return err;
 }
index 6bd9fd90964e2670808ee1d586b4080d68f2a1f1..eedc644b78d78338ebb960339f3d1d224b837b9b 100644 (file)
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
                 * don't make any buffers if there is only one buffer on
                 * the page and the page just needs to be set up to date
                 */
-               if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
+               if (inode->i_blkbits == PAGE_SHIFT &&
                    buffer_uptodate(bh)) {
                        SetPageUptodate(page);    
                        return;
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
-       const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+       const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
        const unsigned blocksize = 1 << blkbits;
        sector_t block_in_file;
        sector_t last_block;
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
        if (page_has_buffers(page))
                goto confused;
 
-       block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+       block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
        last_block = block_in_file + nr_pages * blocks_per_page;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
        if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
        }
 
        if (first_hole != blocks_per_page) {
-               zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
+               zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
                if (first_hole == 0) {
                        SetPageUptodate(page);
                        unlock_page(page);
@@ -331,7 +331,7 @@ confused:
  *
  * then this code just gives up and calls the buffer_head-based read function.
  * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
  *
  * BH_Boundary explanation:
  *
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                                        &first_logical_block,
                                        get_block, gfp);
                }
-               page_cache_release(page);
+               put_page(page);
        }
        BUG_ON(!list_empty(pages));
        if (bio)
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
        unsigned long end_index;
-       const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+       const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
        sector_t last_block;
        sector_t block_in_file;
        sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
         * The page has no buffers: map it to disk
         */
        BUG_ON(!PageUptodate(page));
-       block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+       block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
        last_block = (i_size - 1) >> blkbits;
        map_bh.b_page = page;
        for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        first_unmapped = page_block;
 
 page_is_mapped:
-       end_index = i_size >> PAGE_CACHE_SHIFT;
+       end_index = i_size >> PAGE_SHIFT;
        if (page->index >= end_index) {
                /*
                 * The page straddles i_size.  It must be zeroed out on each
@@ -584,11 +584,11 @@ page_is_mapped:
                 * is zeroed when mapped, and writes to that region are not
                 * written out to the file."
                 */
-               unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
+               unsigned offset = i_size & (PAGE_SIZE - 1);
 
                if (page->index > end_index || !offset)
                        goto confused;
-               zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+               zero_user_segment(page, offset, PAGE_SIZE);
        }
 
        /*
index b7f8eaeea5d83d8a1cb66e2d697de5341e7361cd..bfdad003ee56f69a8eb55dbd5023b384d4b55fdc 100644 (file)
@@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
                        kunmap(ctl.page);
                        SetPageUptodate(ctl.page);
                        unlock_page(ctl.page);
-                       page_cache_release(ctl.page);
+                       put_page(ctl.page);
                        ctl.page = NULL;
                }
                ctl.idx  = 0;
@@ -520,7 +520,7 @@ invalid_cache:
        if (ctl.page) {
                kunmap(ctl.page);
                unlock_page(ctl.page);
-               page_cache_release(ctl.page);
+               put_page(ctl.page);
                ctl.page = NULL;
        }
        ctl.cache = cache;
@@ -554,14 +554,14 @@ finished:
                kunmap(ctl.page);
                SetPageUptodate(ctl.page);
                unlock_page(ctl.page);
-               page_cache_release(ctl.page);
+               put_page(ctl.page);
        }
        if (page) {
                cache->head = ctl.head;
                kunmap(page);
                SetPageUptodate(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 out:
        return result;
@@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
                        kunmap(ctl.page);
                        SetPageUptodate(ctl.page);
                        unlock_page(ctl.page);
-                       page_cache_release(ctl.page);
+                       put_page(ctl.page);
                }
                ctl.cache = NULL;
                ctl.idx  -= NCP_DIRCACHE_SIZE;
index 5233fbc1747a5bf555da56c04917cc418c4390ed..17cfb743b5bf03925c2dd08e3e6f67e57b04a8a9 100644 (file)
@@ -191,7 +191,7 @@ struct ncp_cache_head {
        int             eof;
 };
 
-#define NCP_DIRCACHE_SIZE      ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *)))
+#define NCP_DIRCACHE_SIZE      ((int)(PAGE_SIZE/sizeof(struct dentry *)))
 union ncp_dir_cache {
        struct ncp_cache_head   head;
        struct dentry           *dentry[NCP_DIRCACHE_SIZE];
index 02e4d87d2ed3181ee9cda016a970eced2810898d..17a42e4eb8728371f4aec957a545c47d332e7fcd 100644 (file)
@@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
        size_t bytes_left = header->args.count;
        unsigned int pg_offset = header->args.pgbase, pg_len;
        struct page **pages = header->args.pages;
-       int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+       int pg_index = header->args.pgbase >> PAGE_SHIFT;
        const bool is_dio = (header->dreq != NULL);
        struct blk_plug plug;
        int i;
@@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header)
                }
 
                if (is_dio) {
-                       if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
-                               pg_len = PAGE_CACHE_SIZE - pg_offset;
+                       if (pg_offset + bytes_left > PAGE_SIZE)
+                               pg_len = PAGE_SIZE - pg_offset;
                        else
                                pg_len = bytes_left;
                } else {
                        BUG_ON(pg_offset != 0);
-                       pg_len = PAGE_CACHE_SIZE;
+                       pg_len = PAGE_SIZE;
                }
 
                if (is_hole(&be)) {
@@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work)
 
        if (likely(!hdr->pnfs_error)) {
                struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
-               u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
+               u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
                u64 end = (hdr->args.offset + hdr->args.count +
-                       PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
+                       PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
 
                ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
                                        (end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
        loff_t offset = header->args.offset;
        size_t count = header->args.count;
        struct page **pages = header->args.pages;
-       int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+       int pg_index = header->args.pgbase >> PAGE_SHIFT;
        unsigned int pg_len;
        struct blk_plug plug;
        int i;
@@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
        blk_start_plug(&plug);
 
        /* we always write out the whole page */
-       offset = offset & (loff_t)PAGE_CACHE_MASK;
+       offset = offset & (loff_t)PAGE_MASK;
        isect = offset >> SECTOR_SHIFT;
 
        for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
                        extent_length = be.be_length - (isect - be.be_f_offset);
                }
 
-               pg_len = PAGE_CACHE_SIZE;
+               pg_len = PAGE_SIZE;
                bio = do_add_page_to_bio(bio, header->page_array.npages - i,
                                         WRITE, isect, pages[i], &map, &be,
                                         bl_end_io_write, par,
@@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
        pgoff_t end;
 
        /* Optimize common case that writes from 0 to end of file */
-       end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+       end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        if (end != inode->i_mapping->nrpages) {
                rcu_read_lock();
                end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
        }
 
        if (!end)
-               return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
+               return i_size_read(inode) - (idx << PAGE_SHIFT);
        else
-               return (end - idx) << PAGE_CACHE_SHIFT;
+               return (end - idx) << PAGE_SHIFT;
 }
 
 static void
index bc21205309e086019d7cf11112c780ce5b57963a..18e6fd0b9506e931a62acd3f91303574433a1113 100644 (file)
@@ -40,8 +40,8 @@
 #include "../pnfs.h"
 #include "../netns.h"
 
-#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
-#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
+#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
 
 struct pnfs_block_dev;
index d6d5d2a48e838f9b65b5fa71809afc3562c30e27..0c96528db94af35ba362f71a7af4eca26e41f8c9 100644 (file)
@@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
                server->rsize = max_rpc_payload;
        if (server->rsize > NFS_MAX_FILE_IO_SIZE)
                server->rsize = NFS_MAX_FILE_IO_SIZE;
-       server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        server->backing_dev_info.name = "nfs";
        server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
                server->wsize = max_rpc_payload;
        if (server->wsize > NFS_MAX_FILE_IO_SIZE)
                server->wsize = NFS_MAX_FILE_IO_SIZE;
-       server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
 
        server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
-       if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES)
-               server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES;
+       if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
+               server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
        if (server->dtsize > server->rsize)
                server->dtsize = server->rsize;
 
index 4bfa7d8bcadee27ab9a6779b3d427170f98ac918..33eb81738d03f1833250f8ad29224b4c14a37d4c 100644 (file)
@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
  again:
        timestamp = jiffies;
        gencount = nfs_inc_attr_generation_counter();
-       error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
+       error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
                                          NFS_SERVER(inode)->dtsize, desc->plus);
        if (error < 0) {
                /* We requested READDIRPLUS, but the server doesn't grok it */
@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
                count++;
 
                if (desc->plus != 0)
-                       nfs_prime_dcache(desc->file->f_path.dentry, entry);
+                       nfs_prime_dcache(file_dentry(desc->file), entry);
 
                status = nfs_readdir_add_to_array(entry, page);
                if (status != 0)
@@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
 {
        if (!desc->page->mapping)
                nfs_readdir_clear_array(desc->page);
-       page_cache_release(desc->page);
+       put_page(desc->page);
        desc->page = NULL;
 }
 
@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
  */
 static int nfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry   *dentry = file->f_path.dentry;
+       struct dentry   *dentry = file_dentry(file);
        struct inode    *inode = d_inode(dentry);
        nfs_readdir_descriptor_t my_desc,
                        *desc = &my_desc;
@@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
                 * add_to_page_cache_lru() grabs an extra page refcount.
                 * Drop it here to avoid leaking this page later.
                 */
-               page_cache_release(page);
+               put_page(page);
        } else
                __free_page(page);
 
index 7a0cfd3266e561620577bef3cf449171f10d9f92..c93826e4a8c6be0df99295da39a6655a77336600 100644 (file)
@@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 {
        unsigned int i;
        for (i = 0; i < npages; i++)
-               page_cache_release(pages[i]);
+               put_page(pages[i]);
 }
 
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
                      iov_iter_count(iter));
 
        pos = iocb->ki_pos;
-       end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
+       end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 
        inode_lock(inode);
 
@@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
        if (mapping->nrpages) {
                result = invalidate_inode_pages2_range(mapping,
-                                       pos >> PAGE_CACHE_SHIFT, end);
+                                       pos >> PAGE_SHIFT, end);
                if (result)
                        goto out_unlock;
        }
@@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
        if (mapping->nrpages) {
                invalidate_inode_pages2_range(mapping,
-                                             pos >> PAGE_CACHE_SHIFT, end);
+                                             pos >> PAGE_SHIFT, end);
        }
 
        inode_unlock(inode);
index 89bf093d342a583d0d3c19738a2baebf046c68e2..be01095b97ae6a5a66ed1a6db3a3191bbb04a4c5 100644 (file)
@@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page,
                        loff_t pos, unsigned len)
 {
        unsigned int pglen = nfs_page_length(page);
-       unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned int offset = pos & (PAGE_SIZE - 1);
        unsigned int end = offset + len;
 
        if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
                        struct page **pagep, void **fsdata)
 {
        int ret;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct page *page;
        int once_thru = 0;
 
@@ -380,12 +380,12 @@ start:
        ret = nfs_flush_incompatible(file, page);
        if (ret) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        } else if (!once_thru &&
                   nfs_want_read_modify_write(file, page, pos, len)) {
                once_thru = 1;
                ret = nfs_readpage(file, page);
-               page_cache_release(page);
+               put_page(page);
                if (!ret)
                        goto start;
        }
@@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *page, void *fsdata)
 {
-       unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned offset = pos & (PAGE_SIZE - 1);
        struct nfs_open_context *ctx = nfs_file_open_context(file);
        int status;
 
@@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
 
                if (pglen == 0) {
                        zero_user_segments(page, 0, offset,
-                                       end, PAGE_CACHE_SIZE);
+                                       end, PAGE_SIZE);
                        SetPageUptodate(page);
                } else if (end >= pglen) {
-                       zero_user_segment(page, end, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, end, PAGE_SIZE);
                        if (offset == 0)
                                SetPageUptodate(page);
                } else
-                       zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, pglen, PAGE_SIZE);
        }
 
        status = nfs_updatepage(file, page, offset, copied);
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (status < 0)
                return status;
@@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
        dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
                 page, offset, length);
 
-       if (offset != 0 || length < PAGE_CACHE_SIZE)
+       if (offset != 0 || length < PAGE_SIZE)
                return;
        /* Cancel any unstarted writes on this page */
        nfs_wb_page_cancel(page_file_mapping(page)->host, page);
index 33d18c4119057bb874604398337a990e94b5f9c4..738c84a42eb0217eae22b264eea27930f29f9118 100644 (file)
@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
 {
        struct nfs_open_context *ctx;
 
-       ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+       ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
        nfs_file_set_open_context(filp, ctx);
index 565f8135ae1fdeb3bdce2c5797451d652c501b4b..f1d1d2c472e99fbde4bfa5f9c30dfedd05c28b90 100644 (file)
@@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page)
 
        if (i_size > 0) {
                pgoff_t page_index = page_file_index(page);
-               pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+               pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
                if (page_index < end_index)
-                       return PAGE_CACHE_SIZE;
+                       return PAGE_SIZE;
                if (page_index == end_index)
-                       return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+                       return ((i_size - 1) & ~PAGE_MASK) + 1;
        }
        return 0;
 }
index 22c35abbee9d6c88244b220054d2256c30d050ec..d0390516467c00f7f7506e3832b561338cd5bb79 100644 (file)
@@ -26,7 +26,7 @@ static int
 nfs4_file_open(struct inode *inode, struct file *filp)
 {
        struct nfs_open_context *ctx;
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file_dentry(filp);
        struct dentry *parent = NULL;
        struct inode *dir;
        unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        parent = dget_parent(dentry);
        dir = d_inode(parent);
 
-       ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+       ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
                goto out;
index 4e4441216804ec550f2c135ef0f393f4f7a1426c..88474a4fc669053ab078e71f49d818a9e161a2f1 100644 (file)
@@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
                blocksize = be32_to_cpup(p);
                maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
        }
-       maxsize >>= PAGE_CACHE_SHIFT;
+       maxsize >>= PAGE_SHIFT;
        *pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
        return 0;
 out_overflow:
index 9aebffb4050597e1daae74f28375d86335f73e0c..049c1b1f2932be74266a47d97733426d89ec8154 100644 (file)
@@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page)
        dprintk("%s: index=0x%lx\n", __func__,
                (page == ZERO_PAGE(0)) ? -1UL : page->index);
        if (ZERO_PAGE(0) != page)
-               page_cache_release(page);
+               put_page(page);
        return;
 }
 
index 8ce4f61cbaa5ffc5488fbe48fcf22b7f066f23b7..1f6db42310575b15785ee8cf43083b7d134ba872 100644 (file)
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
         * update_nfs_request below if the region is not locked. */
        req->wb_page    = page;
        req->wb_index   = page_file_index(page);
-       page_cache_get(page);
+       get_page(page);
        req->wb_offset  = offset;
        req->wb_pgbase  = offset;
        req->wb_bytes   = count;
@@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req)
        struct nfs_lock_context *l_ctx = req->wb_lock_context;
 
        if (page != NULL) {
-               page_cache_release(page);
+               put_page(page);
                req->wb_page = NULL;
        }
        if (l_ctx != NULL) {
@@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                                return false;
                } else {
                        if (req->wb_pgbase != 0 ||
-                           prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+                           prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
                                return false;
                }
        }
index 2fa483e6dbe2e4a0e4333bf2d76cfd861073232b..89a5ef4df08a3a548af9a8e16a25eff5b50095c2 100644 (file)
@@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
 
                i_size = i_size_read(ino);
 
-               lgp->args.minlength = PAGE_CACHE_SIZE;
+               lgp->args.minlength = PAGE_SIZE;
                if (lgp->args.minlength > range->length)
                        lgp->args.minlength = range->length;
                if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@ lookup_again:
                spin_unlock(&clp->cl_lock);
        }
 
-       pg_offset = arg.offset & ~PAGE_CACHE_MASK;
+       pg_offset = arg.offset & ~PAGE_MASK;
        if (pg_offset) {
                arg.offset -= pg_offset;
                arg.length += pg_offset;
        }
        if (arg.length != NFS4_MAX_UINT64)
-               arg.length = PAGE_CACHE_ALIGN(arg.length);
+               arg.length = PAGE_ALIGN(arg.length);
 
        lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
        atomic_dec(&lo->plh_outstanding);
index eb31e23e7defa5a1cdb494aae35b18ccd3a30a0f..6776d7a7839e0e8afcd966d296678fbc0d69ca7d 100644 (file)
@@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
 static
 int nfs_return_empty_page(struct page *page)
 {
-       zero_user(page, 0, PAGE_CACHE_SIZE);
+       zero_user(page, 0, PAGE_SIZE);
        SetPageUptodate(page);
        unlock_page(page);
        return 0;
@@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
                unlock_page(page);
                return PTR_ERR(new);
        }
-       if (len < PAGE_CACHE_SIZE)
-               zero_user_segment(page, len, PAGE_CACHE_SIZE);
+       if (len < PAGE_SIZE)
+               zero_user_segment(page, len, PAGE_SIZE);
 
        nfs_pageio_init_read(&pgio, inode, false,
                             &nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
        int             error;
 
        dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
-               page, PAGE_CACHE_SIZE, page_file_index(page));
+               page, PAGE_SIZE, page_file_index(page));
        nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
        nfs_add_stats(inode, NFSIOS_READPAGES, 1);
 
@@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page)
        if (IS_ERR(new))
                goto out_error;
 
-       if (len < PAGE_CACHE_SIZE)
-               zero_user_segment(page, len, PAGE_CACHE_SIZE);
+       if (len < PAGE_SIZE)
+               zero_user_segment(page, len, PAGE_SIZE);
        if (!nfs_pageio_add_request(desc->pgio, new)) {
                nfs_list_remove_request(new);
                nfs_readpage_release(new);
@@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
 
        pgm = &pgio.pg_mirrors[0];
        NFS_I(inode)->read_io += pgm->pg_bytes_written;
-       npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
-                PAGE_CACHE_SHIFT;
+       npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
+                PAGE_SHIFT;
        nfs_add_stats(inode, NFSIOS_READPAGES, npages);
 read_complete:
        put_nfs_open_context(desc.ctx);
index 5754835a288608bd7dc3b58b8f92a2a6ee8a20bc..5f4fd53e5764884391569d010661fe1639b35053 100644 (file)
@@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
 
        spin_lock(&inode->i_lock);
        i_size = i_size_read(inode);
-       end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+       end_index = (i_size - 1) >> PAGE_SHIFT;
        if (i_size > 0 && page_file_index(page) < end_index)
                goto out;
        end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
 int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
 {
        loff_t range_start = page_file_offset(page);
-       loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+       loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_ALL,
                .nr_to_write = 0,
index 27f75bcbeb30d78e63321831eb39e7339cf714ce..a9fb3636c14235ca7bb801a8bdb03286c68434df 100644 (file)
@@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
        struct buffer_head *pbh;
        __u64 key;
 
-       key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT -
+       key = page_index(bh->b_page) << (PAGE_SHIFT -
                                         bmap->b_inode->i_blkbits);
        for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
                key++;
index a35ae35e69320f711454125f9a600586869bb980..e0c9daf9aa22e42ab3550e80d94301f6992d5baf 100644 (file)
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
        set_buffer_uptodate(bh);
 
        unlock_page(bh->b_page);
-       page_cache_release(bh->b_page);
+       put_page(bh->b_page);
        return bh;
 }
 
@@ -128,7 +128,7 @@ found:
 
 out_locked:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
@@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
        pgoff_t index = page_index(page);
        int still_dirty;
 
-       page_cache_get(page);
+       get_page(page);
        lock_page(page);
        wait_on_page_writeback(page);
 
@@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
        still_dirty = PageDirty(page);
        mapping = page->mapping;
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (!still_dirty && mapping)
                invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
        obh = ctxt->bh;
        ctxt->newbh = NULL;
 
-       if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
+       if (inode->i_blkbits == PAGE_SHIFT) {
                lock_page(obh->b_page);
                /*
                 * We cannot call radix_tree_preload for the kernels older
index 6b8b92b19cec9c868992fd089a00fc74f669fbdc..e08f064e4bd7b1e46e39319413c2c03e4df1d789 100644 (file)
@@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode)
 static inline void nilfs_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
 {
        unsigned last_byte = inode->i_size;
 
-       last_byte -= page_nr << PAGE_CACHE_SHIFT;
-       if (last_byte > PAGE_CACHE_SIZE)
-               last_byte = PAGE_CACHE_SIZE;
+       last_byte -= page_nr << PAGE_SHIFT;
+       if (last_byte > PAGE_SIZE)
+               last_byte = PAGE_SIZE;
        return last_byte;
 }
 
@@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page)
        unsigned chunk_size = nilfs_chunk_size(dir);
        char *kaddr = page_address(page);
        unsigned offs, rec_len;
-       unsigned limit = PAGE_CACHE_SIZE;
+       unsigned limit = PAGE_SIZE;
        struct nilfs_dir_entry *p;
        char *error;
 
-       if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-               limit = dir->i_size & ~PAGE_CACHE_MASK;
+       if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+               limit = dir->i_size & ~PAGE_MASK;
                if (limit & (chunk_size - 1))
                        goto Ebadsize;
                if (!limit)
@@ -161,7 +161,7 @@ Espan:
 bad_entry:
        nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
                    "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-                   dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+                   dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
                    (unsigned long) le64_to_cpu(p->inode),
                    rec_len, p->name_len);
        goto fail;
@@ -170,7 +170,7 @@ Eend:
        nilfs_error(sb, "nilfs_check_page",
                    "entry in directory #%lu spans the page boundary"
                    "offset=%lu, inode=%lu",
-                   dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+                   dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
                    (unsigned long) le64_to_cpu(p->inode));
 fail:
        SetPageChecked(page);
@@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
        loff_t pos = ctx->pos;
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       unsigned int offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = pos & ~PAGE_MASK;
+       unsigned long n = pos >> PAGE_SHIFT;
        unsigned long npages = dir_pages(inode);
 /*     unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
 
@@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
                if (IS_ERR(page)) {
                        nilfs_error(sb, __func__, "bad page in #%lu",
                                    inode->i_ino);
-                       ctx->pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_SIZE - offset;
                        return -EIO;
                }
                kaddr = page_address(page);
@@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
                if (++n >= npages)
                        n = 0;
                /* next page is past the blocks we've got */
-               if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+               if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
                        nilfs_error(dir->i_sb, __func__,
                               "dir %lu size %lld exceeds block count %llu",
                               dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
        if (de) {
                res = le64_to_cpu(de->inode);
                kunmap(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return res;
 }
@@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
                kaddr = page_address(page);
                dir_end = kaddr + nilfs_last_byte(dir, n);
                de = (struct nilfs_dir_entry *)kaddr;
-               kaddr += PAGE_CACHE_SIZE - reclen;
+               kaddr += PAGE_SIZE - reclen;
                while ((char *)de <= kaddr) {
                        if ((char *)de == dir_end) {
                                /* We hit i_size */
@@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
        kunmap_atomic(kaddr);
        nilfs_commit_chunk(page, mapping, 0, chunk_size);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index 748ca238915a434750f223dc2a6417d80a40e17f..0224b7826ace724ac51ab317816d29ee82d7a4f8 100644 (file)
@@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
 
  failed:
        unlock_page(bh->b_page);
-       page_cache_release(bh->b_page);
+       put_page(bh->b_page);
        return err;
 }
 
index 21a1e2e0d92fe22696ebac0bd702238fb95ec02f..534631358b1336ee0d22fdf8db7bb87ff8720090 100644 (file)
@@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page)
                if (nr_dirty)
                        nilfs_set_file_dirty(inode, nr_dirty);
        } else if (ret) {
-               unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 
                nilfs_set_file_dirty(inode, nr_dirty);
        }
@@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
                           struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
-       unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned start = pos & (PAGE_SIZE - 1);
        unsigned nr_dirty;
        int err;
 
index 1125f40233ffd9c849acc0f13d432f520de95e0a..f6982b9153d58f602e89159302098340f2d33f9d 100644 (file)
@@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
 
  failed_bh:
        unlock_page(bh->b_page);
-       page_cache_release(bh->b_page);
+       put_page(bh->b_page);
        brelse(bh);
 
  failed_unlock:
@@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
 
  failed_bh:
        unlock_page(bh->b_page);
-       page_cache_release(bh->b_page);
+       put_page(bh->b_page);
        brelse(bh);
  failed:
        return ret;
@@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
 {
        pgoff_t index = (pgoff_t)block >>
-               (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               (PAGE_SHIFT - inode->i_blkbits);
        struct page *page;
        unsigned long first_block;
        int ret = 0;
@@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
        wait_on_page_writeback(page);
 
        first_block = (unsigned long)index <<
-               (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               (PAGE_SHIFT - inode->i_blkbits);
        if (page_has_buffers(page)) {
                struct buffer_head *bh;
 
@@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
        }
        still_dirty = PageDirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (still_dirty ||
            invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
        }
 
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return 0;
 }
 
@@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
                        bh_frozen = nilfs_page_get_nth_block(page, n);
                }
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return bh_frozen;
 }
index 7ccdb961eea90d2ed313ef688feca1451112f5ba..151bc19d47c0e189525b283e7d154fe70e56c3c3 100644 (file)
@@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        nilfs_transaction_abort(old_dir->i_sb);
        return err;
index c20df77eff99f16d23ca31dbb131b1d9a45e5571..489391561cdad64899e6470c486d79246fc5aa84 100644 (file)
@@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
        if (!page_has_buffers(page))
                create_empty_buffers(page, 1 << blkbits, b_state);
 
-       first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits);
+       first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
        bh = nilfs_page_get_nth_block(page, block - first_block);
 
        touch_buffer(bh);
@@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
                                      unsigned long b_state)
 {
        int blkbits = inode->i_blkbits;
-       pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
+       pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
        struct page *page;
        struct buffer_head *bh;
 
@@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
        bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
        if (unlikely(!bh)) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                return NULL;
        }
        return bh;
@@ -288,7 +288,7 @@ repeat:
                __set_page_dirty_nobuffers(dpage);
 
                unlock_page(dpage);
-               page_cache_release(dpage);
+               put_page(dpage);
                unlock_page(page);
        }
        pagevec_release(&pvec);
@@ -333,7 +333,7 @@ repeat:
                        WARN_ON(PageDirty(dpage));
                        nilfs_copy_page(dpage, page, 0);
                        unlock_page(dpage);
-                       page_cache_release(dpage);
+                       put_page(dpage);
                } else {
                        struct page *page2;
 
@@ -350,7 +350,7 @@ repeat:
                        if (unlikely(err < 0)) {
                                WARN_ON(err == -EEXIST);
                                page->mapping = NULL;
-                               page_cache_release(page); /* for cache */
+                               put_page(page); /* for cache */
                        } else {
                                page->mapping = dmap;
                                dmap->nrpages++;
@@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
        if (inode->i_mapping->nrpages == 0)
                return 0;
 
-       index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
+       nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
 
        pagevec_init(&pvec, 0);
 
@@ -537,7 +537,7 @@ repeat:
        if (length > 0 && pvec.pages[0]->index > index)
                goto out;
 
-       b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
        i = 0;
        do {
                page = pvec.pages[i];
index 9b4f205d11736dc4d109828260055c2af34c0e9d..5afa77fadc11952ef1e6bad028f921bd049b867c 100644 (file)
@@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
                                blocksize, page, NULL);
 
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                (*nr_salvaged_blocks)++;
                goto next;
 
  failed_page:
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
  failed_inode:
                printk(KERN_WARNING
index 3b65adaae7e47b9732669c4db8a563cfeeec4d87..4317f72568e60651709149497c8c027e608cf773 100644 (file)
@@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                        goto failed_to_write;
 
                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
-                   nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
+                   nilfs->ns_blocksize_bits != PAGE_SHIFT) {
                        /*
                         * At this point, we avoid double buffering
                         * for blocksize < pagesize because page dirty
index 7521e11db728f880e6067aecac7f97f40eac511d..97768a1379f2e60fcaab520e800cb332ee28265c 100644 (file)
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
 
                set_buffer_uptodate(bh);
 
-               file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
+               file_ofs = ((s64)page->index << PAGE_SHIFT) +
                                bh_offset(bh);
                read_lock_irqsave(&ni->size_lock, flags);
                init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
                u32 rec_size;
 
                rec_size = ni->itype.index.block_size;
-               recs = PAGE_CACHE_SIZE / rec_size;
+               recs = PAGE_SIZE / rec_size;
                /* Should have been verified before we got here... */
                BUG_ON(!recs);
                local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
         * fully truncated, truncate will throw it away as soon as we unlock
         * it so no need to worry what we do with it.
         */
-       iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+       iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
        read_lock_irqsave(&ni->size_lock, flags);
        lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
        init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
        vi = page->mapping->host;
        i_size = i_size_read(vi);
        /* Is the page fully outside i_size? (truncate in progress) */
-       if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
-                       PAGE_CACHE_SHIFT)) {
-               zero_user(page, 0, PAGE_CACHE_SIZE);
+       if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+                       PAGE_SHIFT)) {
+               zero_user(page, 0, PAGE_SIZE);
                ntfs_debug("Read outside i_size - truncated?");
                goto done;
        }
@@ -463,7 +463,7 @@ retry_readpage:
         * ok to ignore the compressed flag here.
         */
        if (unlikely(page->index > 0)) {
-               zero_user(page, 0, PAGE_CACHE_SIZE);
+               zero_user(page, 0, PAGE_SIZE);
                goto done;
        }
        if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
                        attr_len);
        /* Zero the remainder of the page. */
-       memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+       memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
        flush_dcache_page(page);
        kunmap_atomic(addr);
 put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        /* NOTE: Different naming scheme to ntfs_read_block()! */
 
        /* The first block in the page. */
-       block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+       block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
 
        read_lock_irqsave(&ni->size_lock, flags);
        i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
                                // in the inode.
                                // Again, for each page do:
                                //      __set_page_dirty_buffers();
-                               // page_cache_release()
+                               // put_page()
                                // We don't need to wait on the writes.
                                // Update iblock.
                        }
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
        ntfs_volume *vol = ni->vol;
        u8 *kaddr;
        unsigned int rec_size = ni->itype.index.block_size;
-       ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
+       ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
        struct buffer_head *bh, *head, *tbh, *rec_start_bh;
        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
        runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
                        (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
        bh_size = vol->sb->s_blocksize;
        bh_size_bits = vol->sb->s_blocksize_bits;
-       max_bhs = PAGE_CACHE_SIZE / bh_size;
+       max_bhs = PAGE_SIZE / bh_size;
        BUG_ON(!max_bhs);
        BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
 
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
        BUG_ON(!bh);
 
        rec_size_bits = ni->itype.index.block_size_bits;
-       BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
+       BUG_ON(!(PAGE_SIZE >> rec_size_bits));
        bhs_per_rec = rec_size >> bh_size_bits;
        BUG_ON(!bhs_per_rec);
 
        /* The first block in the page. */
        rec_block = block = (sector_t)page->index <<
-                       (PAGE_CACHE_SHIFT - bh_size_bits);
+                       (PAGE_SHIFT - bh_size_bits);
 
        /* The first out of bounds block for the data size. */
        dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
                        unsigned long mft_no;
 
                        /* Get the mft record number. */
-                       mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+                       mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
                                        >> rec_size_bits;
                        /* Check whether to write this mft record. */
                        tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
                                continue;
                        ofs = bh_offset(tbh);
                        /* Get the mft record number. */
-                       mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+                       mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
                                        >> rec_size_bits;
                        if (mft_no < vol->mftmirr_size)
                                ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
                 * Set page error if there is only one ntfs record in the page.
                 * Otherwise we would loose per-record granularity.
                 */
-               if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
+               if (ni->itype.index.block_size == PAGE_SIZE)
                        SetPageError(page);
                NVolSetErrors(vol);
        }
@@ -1308,7 +1308,7 @@ done:
                ntfs_debug("Page still contains one or more dirty ntfs "
                                "records.  Redirtying the page starting at "
                                "record 0x%lx.", page->index <<
-                               (PAGE_CACHE_SHIFT - rec_size_bits));
+                               (PAGE_SHIFT - rec_size_bits));
                redirty_page_for_writepage(wbc, page);
                unlock_page(page);
        } else {
@@ -1365,13 +1365,13 @@ retry_writepage:
        BUG_ON(!PageLocked(page));
        i_size = i_size_read(vi);
        /* Is the page fully outside i_size? (truncate in progress) */
-       if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
-                       PAGE_CACHE_SHIFT)) {
+       if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+                       PAGE_SHIFT)) {
                /*
                 * The page may have dirty, unmapped buffers.  Make them
                 * freeable here, so the page does not leak.
                 */
-               block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               block_invalidatepage(page, 0, PAGE_SIZE);
                unlock_page(page);
                ntfs_debug("Write outside i_size - truncated?");
                return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
        /* NInoNonResident() == NInoIndexAllocPresent() */
        if (NInoNonResident(ni)) {
                /* We have to zero every time due to mmap-at-end-of-file. */
-               if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
+               if (page->index >= (i_size >> PAGE_SHIFT)) {
                        /* The page straddles i_size. */
-                       unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
-                       zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
+                       unsigned int ofs = i_size & ~PAGE_MASK;
+                       zero_user_segment(page, ofs, PAGE_SIZE);
                }
                /* Handle mst protected attributes. */
                if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
                        addr, attr_len);
        /* Zero out of bounds area in the page cache page. */
-       memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+       memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
        kunmap_atomic(addr);
        flush_dcache_page(page);
        flush_dcache_mft_record_page(ctx->ntfs_ino);
index caecc58f529c90e06f55bb5415fbe19b0995dac3..820d6eabf60f0c5159734be5ce94cc974b880189 100644 (file)
@@ -40,7 +40,7 @@
 static inline void ntfs_unmap_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /**
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
  * @index:     index into the page cache for @mapping of the page to map
  *
  * Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ * @index, where @index is in units of PAGE_SIZE, and not in bytes.
  *
  * If the page is not in memory it is loaded from disk first using the readpage
  * method defined in the address space operations of @mapping and the page is
index 250ed5b20c8fbed1b9afcf19154a088983986527..44a39a099b54e368ab074b0c26248fdd25735273 100644 (file)
@@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
                        if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
                                        old_ctx.base_ntfs_ino) {
                                put_this_page = old_ctx.ntfs_ino->page;
-                               page_cache_get(put_this_page);
+                               get_page(put_this_page);
                        }
                        /*
                         * Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@ retry_map:
                 * the pieces anyway.
                 */
                if (put_this_page)
-                       page_cache_release(put_this_page);
+                       put_page(put_this_page);
        }
        return err;
 }
@@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
                memcpy(kaddr, (u8*)a +
                                le16_to_cpu(a->data.resident.value_offset),
                                attr_size);
-               memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
+               memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
                kunmap_atomic(kaddr);
                flush_dcache_page(page);
                SetPageUptodate(page);
@@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
        if (page) {
                set_page_dirty(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        ntfs_debug("Done.");
        return 0;
@@ -1835,7 +1835,7 @@ rl_err_out:
                ntfs_free(rl);
 page_err_out:
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (err == -EINVAL)
                err = -EIO;
@@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
        BUG_ON(NInoEncrypted(ni));
        mapping = VFS_I(ni)->i_mapping;
        /* Work out the starting index and page offset. */
-       idx = ofs >> PAGE_CACHE_SHIFT;
-       start_ofs = ofs & ~PAGE_CACHE_MASK;
+       idx = ofs >> PAGE_SHIFT;
+       start_ofs = ofs & ~PAGE_MASK;
        /* Work out the ending index and page offset. */
        end = ofs + cnt;
-       end_ofs = end & ~PAGE_CACHE_MASK;
+       end_ofs = end & ~PAGE_MASK;
        /* If the end is outside the inode size return -ESPIPE. */
        if (unlikely(end > i_size_read(VFS_I(ni)))) {
                ntfs_error(vol->sb, "Request exceeds end of attribute.");
                return -ESPIPE;
        }
-       end >>= PAGE_CACHE_SHIFT;
+       end >>= PAGE_SHIFT;
        /* If there is a first partial page, need to do it the slow way. */
        if (start_ofs) {
                page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                 * If the last page is the same as the first page, need to
                 * limit the write to the end offset.
                 */
-               size = PAGE_CACHE_SIZE;
+               size = PAGE_SIZE;
                if (idx == end)
                        size = end_ofs;
                kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                flush_dcache_page(page);
                kunmap_atomic(kaddr);
                set_page_dirty(page);
-               page_cache_release(page);
+               put_page(page);
                balance_dirty_pages_ratelimited(mapping);
                cond_resched();
                if (idx == end)
@@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                        return -ENOMEM;
                }
                kaddr = kmap_atomic(page);
-               memset(kaddr, val, PAGE_CACHE_SIZE);
+               memset(kaddr, val, PAGE_SIZE);
                flush_dcache_page(page);
                kunmap_atomic(kaddr);
                /*
@@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                set_page_dirty(page);
                /* Finally unlock and release the page. */
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                balance_dirty_pages_ratelimited(mapping);
                cond_resched();
        }
@@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                flush_dcache_page(page);
                kunmap_atomic(kaddr);
                set_page_dirty(page);
-               page_cache_release(page);
+               put_page(page);
                balance_dirty_pages_ratelimited(mapping);
                cond_resched();
        }
index 0809cf876098965fcd33d472201491bb7796403c..ec130c588d2b62d95fd927355579dc7015abd039 100644 (file)
@@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
         * Calculate the indices for the pages containing the first and last
         * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
         */
-       index = start_bit >> (3 + PAGE_CACHE_SHIFT);
-       end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
+       index = start_bit >> (3 + PAGE_SHIFT);
+       end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
 
        /* Get the page containing the first bit (@start_bit). */
        mapping = vi->i_mapping;
@@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
        kaddr = page_address(page);
 
        /* Set @pos to the position of the byte containing @start_bit. */
-       pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
+       pos = (start_bit >> 3) & ~PAGE_MASK;
 
        /* Calculate the position of @start_bit in the first byte. */
        bit = start_bit & 7;
@@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
         * Depending on @value, modify all remaining whole bytes in the page up
         * to @cnt.
         */
-       len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
+       len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
        memset(kaddr + pos, value ? 0xff : 0, len);
        cnt -= len << 3;
 
@@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
                 * Depending on @value, modify all remaining whole bytes in the
                 * page up to @cnt.
                 */
-               len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
+               len = min_t(s64, cnt >> 3, PAGE_SIZE);
                memset(kaddr, value ? 0xff : 0, len);
                cnt -= len << 3;
        }
index f82498c35e78a08a0758767be54f5d28854d7d8d..f2b5e746f49b747c5c968fbf09d1a09dbd116269 100644 (file)
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
        unsigned int kp_ofs;
 
        ntfs_debug("Zeroing page region outside initialized size.");
-       if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
-               /*
-                * FIXME: Using clear_page() will become wrong when we get
-                * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
-                */
+       if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
                clear_page(kp);
                return;
        }
-       kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
-       memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
+       kp_ofs = initialized_size & ~PAGE_MASK;
+       memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
        return;
 }
 
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
 static inline void handle_bounds_compressed_page(struct page *page,
                const loff_t i_size, const s64 initialized_size)
 {
-       if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
+       if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
                        (initialized_size < i_size))
                zero_partial_compressed_page(page, initialized_size);
        return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
  * completed during the decompression of the compression block (@cb_start).
  *
- * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
  * unpredicatbly! You have been warned!
  *
  * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
                                if (di == xpage)
                                        *xpage_done = 1;
                                else
-                                       page_cache_release(dp);
+                                       put_page(dp);
                                dest_pages[di] = NULL;
                        }
                }
@@ -274,7 +270,7 @@ return_error:
                cb = cb_sb_end;
 
                /* Advance destination position to next sub-block. */
-               *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
+               *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
                if (!*dest_ofs && (++*dest_index > dest_max_index))
                        goto return_overflow;
                goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
 
                /* Advance destination position to next sub-block. */
                *dest_ofs += NTFS_SB_SIZE;
-               if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
+               if (!(*dest_ofs &= ~PAGE_MASK)) {
 finalize_page:
                        /*
                         * First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
                        *dest_ofs += nr_bytes;
                }
                /* We have finished the current sub-block. */
-               if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
+               if (!(*dest_ofs &= ~PAGE_MASK))
                        goto finalize_page;
                goto do_next_sb;
        }
@@ -462,7 +458,7 @@ return_overflow:
  * have been written to so that we would lose data if we were to just overwrite
  * them with the out-of-date uncompressed data.
  *
- * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
  * the end of the file I think. We need to detect this case and zero the out
  * of bounds remainder of the page in question and mark it as handled. At the
  * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
  * clusters so is probably not going to be seen by anyone. Still this should
  * be fixed. (AIA)
  *
- * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
  * handling sparse and compressed cbs. (AIA)
  *
  * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
        u64 cb_size_mask = cb_size - 1UL;
        VCN vcn;
        LCN lcn;
-       /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
-       VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
+       /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
+       VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
                        vol->cluster_size_bits;
        /*
         * The first vcn after the last wanted vcn (minimum alignment is again
-        * PAGE_CACHE_SIZE.
+        * PAGE_SIZE.
         */
-       VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
+       VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
                        & ~cb_size_mask) >> vol->cluster_size_bits;
        /* Number of compression blocks (cbs) in the wanted vcn range. */
        unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
         * guarantees of start_vcn and end_vcn, no need to round up here.
         */
        unsigned int nr_pages = (end_vcn - start_vcn) <<
-                       vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+                       vol->cluster_size_bits >> PAGE_SHIFT;
        unsigned int xpage, max_page, cur_page, cur_ofs, i;
        unsigned int cb_clusters, cb_max_ofs;
        int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
         * We have already been given one page, this is the one we must do.
         * Once again, the alignment guarantees keep it simple.
         */
-       offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+       offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
        xpage = index - offset;
        pages[xpage] = page;
        /*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
        i_size = i_size_read(VFS_I(ni));
        initialized_size = ni->initialized_size;
        read_unlock_irqrestore(&ni->size_lock, flags);
-       max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+       max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
                        offset;
        /* Is the page fully outside i_size? (truncate in progress) */
        if (xpage >= max_page) {
                kfree(bhs);
                kfree(pages);
-               zero_user(page, 0, PAGE_CACHE_SIZE);
+               zero_user(page, 0, PAGE_SIZE);
                ntfs_debug("Compressed read outside i_size - truncated?");
                SetPageUptodate(page);
                unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
                                continue;
                        }
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        pages[i] = NULL;
                }
        }
@@ -735,9 +731,9 @@ lock_retry_remap:
        ntfs_debug("Successfully read the compression block.");
 
        /* The last page and maximum offset within it for the current cb. */
-       cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
-       cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
-       cb_max_page >>= PAGE_CACHE_SHIFT;
+       cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
+       cb_max_ofs = cb_max_page & ~PAGE_MASK;
+       cb_max_page >>= PAGE_SHIFT;
 
        /* Catch end of file inside a compression block. */
        if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
                for (; cur_page < cb_max_page; cur_page++) {
                        page = pages[cur_page];
                        if (page) {
-                               /*
-                                * FIXME: Using clear_page() will become wrong
-                                * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
-                                * for now there is no problem.
-                                */
                                if (likely(!cur_ofs))
                                        clear_page(page_address(page));
                                else
                                        memset(page_address(page) + cur_ofs, 0,
-                                                       PAGE_CACHE_SIZE -
+                                                       PAGE_SIZE -
                                                        cur_ofs);
                                flush_dcache_page(page);
                                kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
                                if (cur_page == xpage)
                                        xpage_done = 1;
                                else
-                                       page_cache_release(page);
+                                       put_page(page);
                                pages[cur_page] = NULL;
                        }
-                       cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+                       cb_pos += PAGE_SIZE - cur_ofs;
                        cur_ofs = 0;
                        if (cb_pos >= cb_end)
                                break;
@@ -807,7 +798,7 @@ lock_retry_remap:
                 * synchronous io for the majority of pages.
                 * Or if we choose not to do the read-ahead/-behind stuff, we
                 * could just return block_read_full_page(pages[xpage]) as long
-                * as PAGE_CACHE_SIZE <= cb_size.
+                * as PAGE_SIZE <= cb_size.
                 */
                if (cb_max_ofs)
                        cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
                        page = pages[cur_page];
                        if (page)
                                memcpy(page_address(page) + cur_ofs, cb_pos,
-                                               PAGE_CACHE_SIZE - cur_ofs);
-                       cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+                                               PAGE_SIZE - cur_ofs);
+                       cb_pos += PAGE_SIZE - cur_ofs;
                        cur_ofs = 0;
                        if (cb_pos >= cb_end)
                                break;
@@ -850,10 +841,10 @@ lock_retry_remap:
                                if (cur2_page == xpage)
                                        xpage_done = 1;
                                else
-                                       page_cache_release(page);
+                                       put_page(page);
                                pages[cur2_page] = NULL;
                        }
-                       cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
+                       cb_pos2 += PAGE_SIZE - cur_ofs2;
                        cur_ofs2 = 0;
                        if (cb_pos2 >= cb_end)
                                break;
@@ -884,7 +875,7 @@ lock_retry_remap:
                                        kunmap(page);
                                        unlock_page(page);
                                        if (prev_cur_page != xpage)
-                                               page_cache_release(page);
+                                               put_page(page);
                                        pages[prev_cur_page] = NULL;
                                }
                        }
@@ -914,7 +905,7 @@ lock_retry_remap:
                        kunmap(page);
                        unlock_page(page);
                        if (cur_page != xpage)
-                               page_cache_release(page);
+                               put_page(page);
                        pages[cur_page] = NULL;
                }
        }
@@ -961,7 +952,7 @@ err_out:
                        kunmap(page);
                        unlock_page(page);
                        if (i != xpage)
-                               page_cache_release(page);
+                               put_page(page);
                }
        }
        kfree(pages);
index b2eff5816adc3e915f03665c4b283f116a7b1eab..a1861357900127e19182932c39322c55d17fe5fe 100644 (file)
@@ -315,11 +315,11 @@ found_it:
 descend_into_child_node:
        /*
         * Convert vcn to index into the index allocation attribute in units
-        * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+        * of PAGE_SIZE and map the page cache page, reading it from
         * disk if necessary.
         */
        page = ntfs_map_page(ia_mapping, vcn <<
-                       dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+                       dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
        if (IS_ERR(page)) {
                ntfs_error(sb, "Failed to map directory index page, error %ld.",
                                -PTR_ERR(page));
@@ -331,9 +331,9 @@ descend_into_child_node:
 fast_descend_into_child_node:
        /* Get to the index allocation block. */
        ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-                       dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+                       dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
        /* Bounds checks. */
-       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
                                "inode 0x%lx or driver bug.", dir_ni->mft_no);
                goto unm_err_out;
@@ -366,7 +366,7 @@ fast_descend_into_child_node:
                goto unm_err_out;
        }
        index_end = (u8*)ia + dir_ni->itype.index.block_size;
-       if (index_end > kaddr + PAGE_CACHE_SIZE) {
+       if (index_end > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
                                "0x%lx crosses page boundary. Impossible! "
                                "Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@ found_it2:
                        /* If vcn is in the same page cache page as old_vcn we
                         * recycle the mapped page. */
                        if (old_vcn << vol->cluster_size_bits >>
-                                       PAGE_CACHE_SHIFT == vcn <<
+                                       PAGE_SHIFT == vcn <<
                                        vol->cluster_size_bits >>
-                                       PAGE_CACHE_SHIFT)
+                                       PAGE_SHIFT)
                                goto fast_descend_into_child_node;
                        unlock_page(page);
                        ntfs_unmap_page(page);
@@ -793,11 +793,11 @@ found_it:
 descend_into_child_node:
        /*
         * Convert vcn to index into the index allocation attribute in units
-        * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+        * of PAGE_SIZE and map the page cache page, reading it from
         * disk if necessary.
         */
        page = ntfs_map_page(ia_mapping, vcn <<
-                       dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+                       dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
        if (IS_ERR(page)) {
                ntfs_error(sb, "Failed to map directory index page, error %ld.",
                                -PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
 fast_descend_into_child_node:
        /* Get to the index allocation block. */
        ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-                       dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+                       dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
        /* Bounds checks. */
-       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
                                "inode 0x%lx or driver bug.", dir_ni->mft_no);
                goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
                goto unm_err_out;
        }
        index_end = (u8*)ia + dir_ni->itype.index.block_size;
-       if (index_end > kaddr + PAGE_CACHE_SIZE) {
+       if (index_end > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
                                "0x%lx crosses page boundary. Impossible! "
                                "Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
                        /* If vcn is in the same page cache page as old_vcn we
                         * recycle the mapped page. */
                        if (old_vcn << vol->cluster_size_bits >>
-                                       PAGE_CACHE_SHIFT == vcn <<
+                                       PAGE_SHIFT == vcn <<
                                        vol->cluster_size_bits >>
-                                       PAGE_CACHE_SHIFT)
+                                       PAGE_SHIFT)
                                goto fast_descend_into_child_node;
                        unlock_page(page);
                        ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@ skip_index_root:
                goto iput_err_out;
        }
        /* Get the starting bit position in the current bitmap page. */
-       cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1);
-       bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1);
+       cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
+       bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
 get_next_bmp_page:
        ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
-                       (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),
+                       (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
                        (unsigned long long)bmp_pos &
-                       (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));
+                       (unsigned long long)((PAGE_SIZE * 8) - 1));
        bmp_page = ntfs_map_page(bmp_mapping,
-                       bmp_pos >> (3 + PAGE_CACHE_SHIFT));
+                       bmp_pos >> (3 + PAGE_SHIFT));
        if (IS_ERR(bmp_page)) {
                ntfs_error(sb, "Reading index bitmap failed.");
                err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@ find_next_index_buffer:
                 * If we have reached the end of the bitmap page, get the next
                 * page, and put away the old one.
                 */
-               if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) {
+               if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
                        ntfs_unmap_page(bmp_page);
-                       bmp_pos += PAGE_CACHE_SIZE * 8;
+                       bmp_pos += PAGE_SIZE * 8;
                        cur_bmp_pos = 0;
                        goto get_next_bmp_page;
                }
@@ -1285,8 +1285,8 @@ find_next_index_buffer:
        ntfs_debug("Handling index buffer 0x%llx.",
                        (unsigned long long)bmp_pos + cur_bmp_pos);
        /* If the current index buffer is in the same page we reuse the page. */
-       if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) !=
-                       (ia_pos & (s64)PAGE_CACHE_MASK)) {
+       if ((prev_ia_pos & (s64)PAGE_MASK) !=
+                       (ia_pos & (s64)PAGE_MASK)) {
                prev_ia_pos = ia_pos;
                if (likely(ia_page != NULL)) {
                        unlock_page(ia_page);
@@ -1296,7 +1296,7 @@ find_next_index_buffer:
                 * Map the page cache page containing the current ia_pos,
                 * reading it from disk if necessary.
                 */
-               ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT);
+               ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
                if (IS_ERR(ia_page)) {
                        ntfs_error(sb, "Reading index allocation data failed.");
                        err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@ find_next_index_buffer:
                kaddr = (u8*)page_address(ia_page);
        }
        /* Get the current index buffer. */
-       ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
-                       ~(s64)(ndir->itype.index.block_size - 1)));
+       ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
+                                         ~(s64)(ndir->itype.index.block_size - 1)));
        /* Bounds checks. */
-       if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+       if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
                ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
                                "inode 0x%lx or driver bug.", vdir->i_ino);
                goto err_out;
@@ -1348,7 +1348,7 @@ find_next_index_buffer:
                goto err_out;
        }
        index_end = (u8*)ia + ndir->itype.index.block_size;
-       if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) {
+       if (unlikely(index_end > kaddr + PAGE_SIZE)) {
                ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
                                "0x%lx crosses page boundary. Impossible! "
                                "Cannot access! This is probably a bug in the "
index bed4d427dfaee110c7da84c70f32c673fbdc80aa..91117ada8528b0090befaeed2fa1ef7c7d38a787 100644 (file)
@@ -220,8 +220,8 @@ do_non_resident_extend:
                m = NULL;
        }
        mapping = vi->i_mapping;
-       index = old_init_size >> PAGE_CACHE_SHIFT;
-       end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       index = old_init_size >> PAGE_SHIFT;
+       end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        do {
                /*
                 * Read the page.  If the page is not present, this will zero
@@ -233,7 +233,7 @@ do_non_resident_extend:
                        goto init_err_out;
                }
                if (unlikely(PageError(page))) {
-                       page_cache_release(page);
+                       put_page(page);
                        err = -EIO;
                        goto init_err_out;
                }
@@ -242,13 +242,13 @@ do_non_resident_extend:
                 * enough to make ntfs_writepage() work.
                 */
                write_lock_irqsave(&ni->size_lock, flags);
-               ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
+               ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
                if (ni->initialized_size > new_init_size)
                        ni->initialized_size = new_init_size;
                write_unlock_irqrestore(&ni->size_lock, flags);
                /* Set the page dirty so it gets written out. */
                set_page_dirty(page);
-               page_cache_release(page);
+               put_page(page);
                /*
                 * Play nice with the vm and the rest of the system.  This is
                 * very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@ out:
 err_out:
        while (nr > 0) {
                unlock_page(pages[--nr]);
-               page_cache_release(pages[nr]);
+               put_page(pages[nr]);
        }
        goto out;
 }
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
  * only partially being written to.
  *
  * If @nr_pages is greater than one, we are guaranteed that the cluster size is
- * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
+ * greater than PAGE_SIZE, that all pages in @pages are entirely inside
  * the same cluster and that they are the entirety of that cluster, and that
  * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
  *
@@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
        u = 0;
 do_next_page:
        page = pages[u];
-       bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+       bh_pos = (s64)page->index << PAGE_SHIFT;
        bh = head = page_buffers(page);
        do {
                VCN cdelta;
@@ -810,11 +810,11 @@ map_buffer_cached:
                                        
                                kaddr = kmap_atomic(page);
                                if (bh_pos < pos) {
-                                       pofs = bh_pos & ~PAGE_CACHE_MASK;
+                                       pofs = bh_pos & ~PAGE_MASK;
                                        memset(kaddr + pofs, 0, pos - bh_pos);
                                }
                                if (bh_end > end) {
-                                       pofs = end & ~PAGE_CACHE_MASK;
+                                       pofs = end & ~PAGE_MASK;
                                        memset(kaddr + pofs, 0, bh_end - end);
                                }
                                kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@ rl_not_mapped_enoent:
                 * unmapped.  This can only happen when the cluster size is
                 * less than the page cache size.
                 */
-               if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
+               if (unlikely(vol->cluster_size < PAGE_SIZE)) {
                        bh_cend = (bh_end + vol->cluster_size - 1) >>
                                        vol->cluster_size_bits;
                        if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@ rl_not_mapped_enoent:
                wait_on_buffer(bh);
                if (likely(buffer_uptodate(bh))) {
                        page = bh->b_page;
-                       bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
+                       bh_pos = ((s64)page->index << PAGE_SHIFT) +
                                        bh_offset(bh);
                        /*
                         * If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@ rl_not_mapped_enoent:
                bh = head = page_buffers(page);
                do {
                        if (u == nr_pages &&
-                                       ((s64)page->index << PAGE_CACHE_SHIFT) +
+                                       ((s64)page->index << PAGE_SHIFT) +
                                        bh_offset(bh) >= end)
                                break;
                        if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
                bool partial;
 
                page = pages[u];
-               bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+               bh_pos = (s64)page->index << PAGE_SHIFT;
                bh = head = page_buffers(page);
                partial = false;
                do {
@@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
                if (end < attr_len)
                        memcpy(kaddr + end, kattr + end, attr_len - end);
                /* Zero the region outside the end of the attribute value. */
-               memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+               memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
                flush_dcache_page(page);
                SetPageUptodate(page);
        }
@@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
        unsigned len, copied;
 
        do {
-               len = PAGE_CACHE_SIZE - ofs;
+               len = PAGE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
                copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@ out:
        return total;
 err:
        /* Zero the rest of the target like __copy_from_user(). */
-       len = PAGE_CACHE_SIZE - copied;
+       len = PAGE_SIZE - copied;
        do {
                if (len > bytes)
                        len = bytes;
                zero_user(*pages, copied, len);
                bytes -= len;
                copied = 0;
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
        } while (++pages < last_page);
        goto out;
 }
@@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
         * attributes.
         */
        nr_pages = 1;
-       if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
-               nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
+       if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
+               nr_pages = vol->cluster_size >> PAGE_SHIFT;
        last_vcn = -1;
        do {
                VCN vcn;
@@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
                unsigned ofs, do_pages, u;
                size_t copied;
 
-               start_idx = idx = pos >> PAGE_CACHE_SHIFT;
-               ofs = pos & ~PAGE_CACHE_MASK;
-               bytes = PAGE_CACHE_SIZE - ofs;
+               start_idx = idx = pos >> PAGE_SHIFT;
+               ofs = pos & ~PAGE_MASK;
+               bytes = PAGE_SIZE - ofs;
                do_pages = 1;
                if (nr_pages > 1) {
                        vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
                                if (lcn == LCN_HOLE) {
                                        start_idx = (pos & ~(s64)
                                                        vol->cluster_size_mask)
-                                                       >> PAGE_CACHE_SHIFT;
+                                                       >> PAGE_SHIFT;
                                        bytes = vol->cluster_size - (pos &
                                                        vol->cluster_size_mask);
                                        do_pages = nr_pages;
@@ -1871,12 +1871,12 @@ again:
                        if (unlikely(status)) {
                                do {
                                        unlock_page(pages[--do_pages]);
-                                       page_cache_release(pages[do_pages]);
+                                       put_page(pages[do_pages]);
                                } while (do_pages);
                                break;
                        }
                }
-               u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
+               u = (pos >> PAGE_SHIFT) - pages[0]->index;
                copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
                                        i, bytes);
                ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@ again:
                }
                do {
                        unlock_page(pages[--do_pages]);
-                       page_cache_release(pages[do_pages]);
+                       put_page(pages[do_pages]);
                } while (do_pages);
                if (unlikely(status < 0))
                        break;
@@ -1921,7 +1921,7 @@ again:
                }
        } while (iov_iter_count(i));
        if (cached_page)
-               page_cache_release(cached_page);
+               put_page(cached_page);
        ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
                        written ? "written" : "status", (unsigned long)written,
                        (long)status);
index 096c135691aeda354c553fdb622286ea7f048e06..0d645f3579300f78890d70f983501829e3ceb0a4 100644 (file)
@@ -272,11 +272,11 @@ done:
 descend_into_child_node:
        /*
         * Convert vcn to index into the index allocation attribute in units
-        * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+        * of PAGE_SIZE and map the page cache page, reading it from
         * disk if necessary.
         */
        page = ntfs_map_page(ia_mapping, vcn <<
-                       idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+                       idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
        if (IS_ERR(page)) {
                ntfs_error(sb, "Failed to map index page, error %ld.",
                                -PTR_ERR(page));
@@ -288,9 +288,9 @@ descend_into_child_node:
 fast_descend_into_child_node:
        /* Get to the index allocation block. */
        ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-                       idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+                       idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
        /* Bounds checks. */
-       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+       if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Out of bounds check failed.  Corrupt inode "
                                "0x%lx or driver bug.", idx_ni->mft_no);
                goto unm_err_out;
@@ -323,7 +323,7 @@ fast_descend_into_child_node:
                goto unm_err_out;
        }
        index_end = (u8*)ia + idx_ni->itype.index.block_size;
-       if (index_end > kaddr + PAGE_CACHE_SIZE) {
+       if (index_end > kaddr + PAGE_SIZE) {
                ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
                                "crosses page boundary.  Impossible!  Cannot "
                                "access!  This is probably a bug in the "
@@ -427,9 +427,9 @@ ia_done:
                 * the mapped page.
                 */
                if (old_vcn << vol->cluster_size_bits >>
-                               PAGE_CACHE_SHIFT == vcn <<
+                               PAGE_SHIFT == vcn <<
                                vol->cluster_size_bits >>
-                               PAGE_CACHE_SHIFT)
+                               PAGE_SHIFT)
                        goto fast_descend_into_child_node;
                unlock_page(page);
                ntfs_unmap_page(page);
index d284f07eda7754e721bfd2782ba410f04ae145e4..f40972d6df9060d154058a91d9423c2f2ad875de 100644 (file)
@@ -868,12 +868,12 @@ skip_attr_list_load:
                                        ni->itype.index.block_size);
                        goto unm_err_out;
                }
-               if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
+               if (ni->itype.index.block_size > PAGE_SIZE) {
                        ntfs_error(vi->i_sb, "Index block size (%u) > "
-                                       "PAGE_CACHE_SIZE (%ld) is not "
+                                       "PAGE_SIZE (%ld) is not "
                                        "supported.  Sorry.",
                                        ni->itype.index.block_size,
-                                       PAGE_CACHE_SIZE);
+                                       PAGE_SIZE);
                        err = -EOPNOTSUPP;
                        goto unm_err_out;
                }
@@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
                                "two.", ni->itype.index.block_size);
                goto unm_err_out;
        }
-       if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
-               ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+       if (ni->itype.index.block_size > PAGE_SIZE) {
+               ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
                                "(%ld) is not supported.  Sorry.",
-                               ni->itype.index.block_size, PAGE_CACHE_SIZE);
+                               ni->itype.index.block_size, PAGE_SIZE);
                err = -EOPNOTSUPP;
                goto unm_err_out;
        }
index 1711b710b641f40a0c8c5690a94c9681f2b5fa92..27a24a42f71205fc3a800cb61050b33956a156ac 100644 (file)
@@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
                        ntfs_unmap_page(page);
                }
                page = ntfs_map_page(mapping, last_read_pos >>
-                               PAGE_CACHE_SHIFT);
+                               PAGE_SHIFT);
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
                        ntfs_error(vol->sb, "Failed to map page.");
                        goto out;
                }
-               buf_size = last_read_pos & ~PAGE_CACHE_MASK;
+               buf_size = last_read_pos & ~PAGE_MASK;
                buf = page_address(page) + buf_size;
-               buf_size = PAGE_CACHE_SIZE - buf_size;
+               buf_size = PAGE_SIZE - buf_size;
                if (unlikely(last_read_pos + buf_size > i_size))
                        buf_size = i_size - last_read_pos;
                buf_size <<= 3;
index c71de292c5ade3b5103a68e692aea28f540d0246..9d71213ca81e71818c76775b4ebead72ac0670dc 100644 (file)
@@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
         * completely inside @rp, just copy it from there.  Otherwise map all
         * the required pages and copy the data from them.
         */
-       size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK);
+       size = PAGE_SIZE - (pos & ~PAGE_MASK);
        if (size >= le32_to_cpu(rp->system_page_size)) {
                memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
        } else {
@@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
                /* Copy the remaining data one page at a time. */
                have_read = size;
                to_read = le32_to_cpu(rp->system_page_size) - size;
-               idx = (pos + size) >> PAGE_CACHE_SHIFT;
-               BUG_ON((pos + size) & ~PAGE_CACHE_MASK);
+               idx = (pos + size) >> PAGE_SHIFT;
+               BUG_ON((pos + size) & ~PAGE_MASK);
                do {
                        page = ntfs_map_page(vi->i_mapping, idx);
                        if (IS_ERR(page)) {
@@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
                                        err = -EIO;
                                goto err_out;
                        }
-                       size = min_t(int, to_read, PAGE_CACHE_SIZE);
+                       size = min_t(int, to_read, PAGE_SIZE);
                        memcpy((u8*)trp + have_read, page_address(page), size);
                        ntfs_unmap_page(page);
                        have_read += size;
@@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
         * log page size if the page cache size is between the default log page
         * size and twice that.
         */
-       if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <=
+       if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
                        DefaultLogPageSize * 2)
                log_page_size = DefaultLogPageSize;
        else
-               log_page_size = PAGE_CACHE_SIZE;
+               log_page_size = PAGE_SIZE;
        log_page_mask = log_page_size - 1;
        /*
         * Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
         * to be empty.
         */
        for (pos = 0; pos < size; pos <<= 1) {
-               pgoff_t idx = pos >> PAGE_CACHE_SHIFT;
+               pgoff_t idx = pos >> PAGE_SHIFT;
                if (!page || page->index != idx) {
                        if (page)
                                ntfs_unmap_page(page);
@@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
                                goto err_out;
                        }
                }
-               kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK);
+               kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
                /*
                 * A non-empty block means the logfile is not empty while an
                 * empty block after a non-empty block has been encountered
index 3014a36a255b97ddcac0b1852fa0d11c613b7a9d..37b2501caaa43e31eb3a20dd7ea75f2d0429d945 100644 (file)
@@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
         * here if the volume was that big...
         */
        index = (u64)ni->mft_no << vol->mft_record_size_bits >>
-                       PAGE_CACHE_SHIFT;
-       ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+                       PAGE_SHIFT;
+       ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
 
        i_size = i_size_read(mft_vi);
        /* The maximum valid index into the page cache for $MFT's data. */
-       end_index = i_size >> PAGE_CACHE_SHIFT;
+       end_index = i_size >> PAGE_SHIFT;
 
        /* If the wanted index is out of bounds the mft record doesn't exist. */
        if (unlikely(index >= end_index)) {
-               if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
+               if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
                                vol->mft_record_size) {
                        page = ERR_PTR(-ENOENT);
                        ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
        }
        /* Get the page containing the mirror copy of the mft record @m. */
        page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
-                       (PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
+                       (PAGE_SHIFT - vol->mft_record_size_bits));
        if (IS_ERR(page)) {
                ntfs_error(vol->sb, "Failed to map mft mirror page.");
                err = PTR_ERR(page);
@@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
        BUG_ON(!PageUptodate(page));
        ClearPageUptodate(page);
        /* Offset of the mft mirror record inside the page. */
-       page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+       page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
        /* The address in the page of the mirror copy of the mft record @m. */
        kmirr = page_address(page) + page_ofs;
        /* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
        for (; pass <= 2;) {
                /* Cap size to pass_end. */
                ofs = data_pos >> 3;
-               page_ofs = ofs & ~PAGE_CACHE_MASK;
-               size = PAGE_CACHE_SIZE - page_ofs;
+               page_ofs = ofs & ~PAGE_MASK;
+               size = PAGE_SIZE - page_ofs;
                ll = ((pass_end + 7) >> 3) - ofs;
                if (size > ll)
                        size = ll;
@@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
                 */
                if (size) {
                        page = ntfs_map_page(mftbmp_mapping,
-                                       ofs >> PAGE_CACHE_SHIFT);
+                                       ofs >> PAGE_SHIFT);
                        if (IS_ERR(page)) {
                                ntfs_error(vol->sb, "Failed to read mft "
                                                "bitmap, aborting.");
@@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
         */
        ll = lcn >> 3;
        page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
-                       ll >> PAGE_CACHE_SHIFT);
+                       ll >> PAGE_SHIFT);
        if (IS_ERR(page)) {
                up_write(&mftbmp_ni->runlist.lock);
                ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
                return PTR_ERR(page);
        }
-       b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
+       b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
        tb = 1 << (lcn & 7ull);
        down_write(&vol->lcnbmp_lock);
        if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
         * The index into the page cache and the offset within the page cache
         * page of the wanted mft record.
         */
-       index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
-       ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+       index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
+       ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
        /* The maximum valid index into the page cache for $MFT's data. */
        i_size = i_size_read(mft_vi);
-       end_index = i_size >> PAGE_CACHE_SHIFT;
+       end_index = i_size >> PAGE_SHIFT;
        if (unlikely(index >= end_index)) {
                if (unlikely(index > end_index || ofs + vol->mft_record_size >=
-                               (i_size & ~PAGE_CACHE_MASK))) {
+                               (i_size & ~PAGE_MASK))) {
                        ntfs_error(vol->sb, "Tried to format non-existing mft "
                                        "record 0x%llx.", (long long)mft_no);
                        return -ENOENT;
@@ -2515,8 +2515,8 @@ mft_rec_already_initialized:
         * We now have allocated and initialized the mft record.  Calculate the
         * index of and the offset within the page cache page the record is in.
         */
-       index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
-       ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+       index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
+       ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
        /* Read, map, and pin the page containing the mft record. */
        page = ntfs_map_page(vol->mft_ino->i_mapping, index);
        if (IS_ERR(page)) {
index c581e26a350d5e15ebe4740a841da92c3ad4cf40..12de47b96ca95d219d03e4be0c3c298b05f56789 100644 (file)
@@ -43,7 +43,7 @@ typedef enum {
        NTFS_MAX_NAME_LEN       = 255,
        NTFS_MAX_ATTR_NAME_LEN  = 255,
        NTFS_MAX_CLUSTER_SIZE   = 64 * 1024,    /* 64kiB */
-       NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE,
+       NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
 } NTFS_CONSTANTS;
 
 /* Global variables. */
index 1b38abdaa3ed3da7522f461ae49a77ccf5ee5982..ecb49870a680cd594e276b10a59ff679f5e681e7 100644 (file)
@@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
        ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
                        vol->mft_record_size_bits, vol->mft_record_size_bits);
        /*
-        * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+        * We cannot support mft record sizes above the PAGE_SIZE since
         * we store $MFT/$DATA, the table of mft records in the page cache.
         */
-       if (vol->mft_record_size > PAGE_CACHE_SIZE) {
+       if (vol->mft_record_size > PAGE_SIZE) {
                ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
-                               "PAGE_CACHE_SIZE on your system (%lu).  "
+                               "PAGE_SIZE on your system (%lu).  "
                                "This is not supported.  Sorry.",
-                               vol->mft_record_size, PAGE_CACHE_SIZE);
+                               vol->mft_record_size, PAGE_SIZE);
                return false;
        }
        /* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol)
 
        ntfs_debug("Entering.");
        /* Compare contents of $MFT and $MFTMirr. */
-       mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size;
+       mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
        BUG_ON(!mrecs_per_page);
        BUG_ON(!vol->mftmirr_size);
        mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol)
        if (!vol->attrdef)
                goto iput_failed;
        index = 0;
-       max_index = i_size >> PAGE_CACHE_SHIFT;
-       size = PAGE_CACHE_SIZE;
+       max_index = i_size >> PAGE_SHIFT;
+       size = PAGE_SIZE;
        while (index < max_index) {
                /* Read the attrdef table and copy it into the linear buffer. */
 read_partial_attrdef_page:
                page = ntfs_map_page(ino->i_mapping, index);
                if (IS_ERR(page))
                        goto free_iput_failed;
-               memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT),
+               memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
                                page_address(page), size);
                ntfs_unmap_page(page);
        };
-       if (size == PAGE_CACHE_SIZE) {
-               size = i_size & ~PAGE_CACHE_MASK;
+       if (size == PAGE_SIZE) {
+               size = i_size & ~PAGE_MASK;
                if (size)
                        goto read_partial_attrdef_page;
        }
@@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol)
        if (!vol->upcase)
                goto iput_upcase_failed;
        index = 0;
-       max_index = i_size >> PAGE_CACHE_SHIFT;
-       size = PAGE_CACHE_SIZE;
+       max_index = i_size >> PAGE_SHIFT;
+       size = PAGE_SIZE;
        while (index < max_index) {
                /* Read the upcase table and copy it into the linear buffer. */
 read_partial_upcase_page:
                page = ntfs_map_page(ino->i_mapping, index);
                if (IS_ERR(page))
                        goto iput_upcase_failed;
-               memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT),
+               memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
                                page_address(page), size);
                ntfs_unmap_page(page);
        };
-       if (size == PAGE_CACHE_SIZE) {
-               size = i_size & ~PAGE_CACHE_MASK;
+       if (size == PAGE_SIZE) {
+               size = i_size & ~PAGE_MASK;
                if (size)
                        goto read_partial_upcase_page;
        }
@@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
        down_read(&vol->lcnbmp_lock);
        /*
         * Convert the number of bits into bytes rounded up, then convert into
-        * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+        * multiples of PAGE_SIZE, rounding up so that if we have one
         * full and one partial page max_index = 2.
         */
-       max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
-                       PAGE_CACHE_SHIFT;
-       /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+       max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
+                       PAGE_SHIFT;
+       /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
        ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
-                       max_index, PAGE_CACHE_SIZE / 4);
+                       max_index, PAGE_SIZE / 4);
        for (index = 0; index < max_index; index++) {
                unsigned long *kaddr;
 
@@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                if (IS_ERR(page)) {
                        ntfs_debug("read_mapping_page() error. Skipping "
                                        "page (index 0x%lx).", index);
-                       nr_free -= PAGE_CACHE_SIZE * 8;
+                       nr_free -= PAGE_SIZE * 8;
                        continue;
                }
                kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                 * ntfs_readpage().
                 */
                nr_free -= bitmap_weight(kaddr,
-                                       PAGE_CACHE_SIZE * BITS_PER_BYTE);
+                                       PAGE_SIZE * BITS_PER_BYTE);
                kunmap_atomic(kaddr);
-               page_cache_release(page);
+               put_page(page);
        }
        ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
        /*
@@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
        pgoff_t index;
 
        ntfs_debug("Entering.");
-       /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+       /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
        ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
-                       "0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
+                       "0x%lx.", max_index, PAGE_SIZE / 4);
        for (index = 0; index < max_index; index++) {
                unsigned long *kaddr;
 
@@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                if (IS_ERR(page)) {
                        ntfs_debug("read_mapping_page() error. Skipping "
                                        "page (index 0x%lx).", index);
-                       nr_free -= PAGE_CACHE_SIZE * 8;
+                       nr_free -= PAGE_SIZE * 8;
                        continue;
                }
                kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                 * ntfs_readpage().
                 */
                nr_free -= bitmap_weight(kaddr,
-                                       PAGE_CACHE_SIZE * BITS_PER_BYTE);
+                                       PAGE_SIZE * BITS_PER_BYTE);
                kunmap_atomic(kaddr);
-               page_cache_release(page);
+               put_page(page);
        }
        ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
                        index - 1);
@@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
        /* Type of filesystem. */
        sfs->f_type   = NTFS_SB_MAGIC;
        /* Optimal transfer block size. */
-       sfs->f_bsize  = PAGE_CACHE_SIZE;
+       sfs->f_bsize  = PAGE_SIZE;
        /*
         * Total data blocks in filesystem in units of f_bsize and since
         * inodes are also stored in data blocs ($MFT is a file) this is just
         * the total clusters.
         */
        sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
-                               PAGE_CACHE_SHIFT;
+                               PAGE_SHIFT;
        /* Free data blocks in filesystem in units of f_bsize. */
        size          = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
-                               PAGE_CACHE_SHIFT;
+                               PAGE_SHIFT;
        if (size < 0LL)
                size = 0LL;
        /* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
        size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
        /*
         * Convert the maximum number of set bits into bytes rounded up, then
-        * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+        * convert into multiples of PAGE_SIZE, rounding up so that if we
         * have one full and one partial page max_index = 2.
         */
        max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
-                       + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+                       + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
        read_unlock_irqrestore(&mft_ni->size_lock, flags);
        /* Number of inodes in filesystem (at this point in time). */
        sfs->f_files = size;
@@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
        if (!parse_options(vol, (char*)opt))
                goto err_out_now;
 
-       /* We support sector sizes up to the PAGE_CACHE_SIZE. */
-       if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
+       /* We support sector sizes up to the PAGE_SIZE. */
+       if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
                if (!silent)
                        ntfs_error(sb, "Device has unsupported sector size "
                                        "(%i).  The maximum supported sector "
                                        "size on this architecture is %lu "
                                        "bytes.",
                                        bdev_logical_block_size(sb->s_bdev),
-                                       PAGE_CACHE_SIZE);
+                                       PAGE_SIZE);
                goto err_out_now;
        }
        /*
index 70907d638b6078548045c511081367f93c14126e..e361d1a0ca09fe411d6d951e9a4ed417940229fb 100644 (file)
@@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
 {
        int i;
        struct page *page;
-       unsigned int from, to = PAGE_CACHE_SIZE;
+       unsigned int from, to = PAGE_SIZE;
        struct super_block *sb = inode->i_sb;
 
        BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
        if (numpages == 0)
                goto out;
 
-       to = PAGE_CACHE_SIZE;
+       to = PAGE_SIZE;
        for(i = 0; i < numpages; i++) {
                page = pages[i];
 
-               from = start & (PAGE_CACHE_SIZE - 1);
-               if ((end >> PAGE_CACHE_SHIFT) == page->index)
-                       to = end & (PAGE_CACHE_SIZE - 1);
+               from = start & (PAGE_SIZE - 1);
+               if ((end >> PAGE_SHIFT) == page->index)
+                       to = end & (PAGE_SIZE - 1);
 
-               BUG_ON(from > PAGE_CACHE_SIZE);
-               BUG_ON(to > PAGE_CACHE_SIZE);
+               BUG_ON(from > PAGE_SIZE);
+               BUG_ON(to > PAGE_SIZE);
 
                ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
                                         &phys);
 
-               start = (page->index + 1) << PAGE_CACHE_SHIFT;
+               start = (page->index + 1) << PAGE_SHIFT;
        }
 out:
        if (pages)
@@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
 
        numpages = 0;
        last_page_bytes = PAGE_ALIGN(end);
-       index = start >> PAGE_CACHE_SHIFT;
+       index = start >> PAGE_SHIFT;
        do {
                pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
 
                numpages++;
                index++;
-       } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT));
+       } while (index < (last_page_bytes >> PAGE_SHIFT));
 
 out:
        if (ret != 0) {
@@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                 * to do that now.
                 */
                if (!ocfs2_sparse_alloc(osb) &&
-                   PAGE_CACHE_SIZE < osb->s_clustersize)
-                       end = PAGE_CACHE_SIZE;
+                   PAGE_SIZE < osb->s_clustersize)
+                       end = PAGE_SIZE;
 
                ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
                if (ret) {
@@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                        goto out_unlock;
                }
 
-               page_end = PAGE_CACHE_SIZE;
-               if (PAGE_CACHE_SIZE > osb->s_clustersize)
+               page_end = PAGE_SIZE;
+               if (PAGE_SIZE > osb->s_clustersize)
                        page_end = osb->s_clustersize;
 
                for (i = 0; i < num_pages; i++)
index 1581240a7ca0496d8fe4910bf09641c5661178af..ad1577348a92d05f05e099e0b7823a439b66a858 100644 (file)
@@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
 
        size = i_size_read(inode);
 
-       if (size > PAGE_CACHE_SIZE ||
+       if (size > PAGE_SIZE ||
            size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
                ocfs2_error(inode->i_sb,
                            "Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
        if (size)
                memcpy(kaddr, di->id2.i_data.id_data, size);
        /* Clear the remaining part of the page */
-       memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
+       memset(kaddr + size, 0, PAGE_SIZE - size);
        flush_dcache_page(page);
        kunmap_atomic(kaddr);
 
@@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
-       loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       loff_t start = (loff_t)page->index << PAGE_SHIFT;
        int ret, unlock = 1;
 
        trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
         * drop out in that case as it's not worth handling here.
         */
        last = list_entry(pages->prev, struct page, lru);
-       start = (loff_t)last->index << PAGE_CACHE_SHIFT;
+       start = (loff_t)last->index << PAGE_SHIFT;
        if (start >= i_size_read(inode))
                goto out_unlock;
 
@@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
                                            unsigned int *start,
                                            unsigned int *end)
 {
-       unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
+       unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
 
-       if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
+       if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
                unsigned int cpp;
 
-               cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
+               cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
 
                cluster_start = cpos % cpp;
                cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@ next_bh:
        return ret;
 }
 
-#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
+#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
 #define OCFS2_MAX_CTXT_PAGES   1
 #else
-#define OCFS2_MAX_CTXT_PAGES   (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
+#define OCFS2_MAX_CTXT_PAGES   (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
 #endif
 
-#define OCFS2_MAX_CLUSTERS_PER_PAGE    (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
+#define OCFS2_MAX_CLUSTERS_PER_PAGE    (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
 
 struct ocfs2_unwritten_extent {
        struct list_head        ue_node;
@@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
                if (pages[i]) {
                        unlock_page(pages[i]);
                        mark_page_accessed(pages[i]);
-                       page_cache_release(pages[i]);
+                       put_page(pages[i]);
                }
        }
 }
@@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
                        }
                }
                mark_page_accessed(wc->w_target_page);
-               page_cache_release(wc->w_target_page);
+               put_page(wc->w_target_page);
        }
        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
 }
@@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
        wc->w_di_bh = di_bh;
        wc->w_type = type;
 
-       if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
+       if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
                wc->w_large_pages = 1;
        else
                wc->w_large_pages = 0;
@@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode,
                                loff_t user_pos, unsigned user_len)
 {
        int i;
-       unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
+       unsigned from = user_pos & (PAGE_SIZE - 1),
                to = user_pos + user_len;
        struct page *tmppage;
 
@@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
                        (page_offset(page) <= user_pos));
 
        if (page == wc->w_target_page) {
-               map_from = user_pos & (PAGE_CACHE_SIZE - 1);
+               map_from = user_pos & (PAGE_SIZE - 1);
                map_to = map_from + user_len;
 
                if (new)
@@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
        struct inode *inode = mapping->host;
        loff_t last_byte;
 
-       target_index = user_pos >> PAGE_CACHE_SHIFT;
+       target_index = user_pos >> PAGE_SHIFT;
 
        /*
         * Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                 */
                last_byte = max(user_pos + user_len, i_size_read(inode));
                BUG_ON(last_byte < 1);
-               end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+               end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
                if ((start + wc->w_num_pages) > end_index)
                        wc->w_num_pages = end_index - start;
        } else {
                wc->w_num_pages = 1;
                start = target_index;
        }
-       end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT;
+       end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
 
        for(i = 0; i < wc->w_num_pages; i++) {
                index = start + i;
@@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                                goto out;
                        }
 
-                       page_cache_get(mmap_page);
+                       get_page(mmap_page);
                        wc->w_pages[i] = mmap_page;
                        wc->w_target_locked = true;
                } else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
 {
        struct ocfs2_write_cluster_desc *desc;
 
-       wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
+       wc->w_target_from = pos & (PAGE_SIZE - 1);
        wc->w_target_to = wc->w_target_from + len;
 
        if (alloc == 0)
@@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
                                                        &wc->w_target_to);
        } else {
                wc->w_target_from = 0;
-               wc->w_target_to = PAGE_CACHE_SIZE;
+               wc->w_target_to = PAGE_SIZE;
        }
 }
 
@@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
                           struct page *page, void *fsdata)
 {
        int i, ret;
-       unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned from, to, start = pos & (PAGE_SIZE - 1);
        struct inode *inode = mapping->host;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
                        from = wc->w_target_from;
                        to = wc->w_target_to;
 
-                       BUG_ON(from > PAGE_CACHE_SIZE ||
-                              to > PAGE_CACHE_SIZE ||
+                       BUG_ON(from > PAGE_SIZE ||
+                              to > PAGE_SIZE ||
                               to < from);
                } else {
                        /*
@@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
                         * to flush their entire range.
                         */
                        from = 0;
-                       to = PAGE_CACHE_SIZE;
+                       to = PAGE_SIZE;
                }
 
                if (page_has_buffers(tmppage)) {
index bd15929b5f9254e62348ff5718a860e37a202e82..1934abb6b68081a91309a1ee503b4653b3cf23f6 100644 (file)
@@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
 
-       vec_start = (cs << bits) % PAGE_CACHE_SIZE;
+       vec_start = (cs << bits) % PAGE_SIZE;
        while(cs < max_slots) {
                current_page = cs / spp;
                page = reg->hr_slot_data[current_page];
 
-               vec_len = min(PAGE_CACHE_SIZE - vec_start,
-                             (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
+               vec_len = min(PAGE_SIZE - vec_start,
+                             (max_slots-cs) * (PAGE_SIZE/spp) );
 
                mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
                     current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
                len = bio_add_page(bio, page, vec_len, vec_start);
                if (len != vec_len) break;
 
-               cs += vec_len / (PAGE_CACHE_SIZE/spp);
+               cs += vec_len / (PAGE_SIZE/spp);
                vec_start = 0;
        }
 
@@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
 
 static void o2hb_init_region_params(struct o2hb_region *reg)
 {
-       reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
+       reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
        reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
 
        mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
index 9aed6e2022014afb71988de025e583b38aea3892..13719d3f35f8817c64774327675aebc5824fdf30 100644 (file)
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
 
        spin_unlock(&dlm->spinlock);
 
+       ret = 0;
+
 done:
        dlm_put(dlm);
        return ret;
index 03768bb3aab154fd408679f0c3fef500efec794d..47b3b2d4e775ea09a3612b0e0bad980184b7d7f1 100644 (file)
@@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb,
                            int silent)
 {
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = DLMFS_MAGIC;
        sb->s_op = &dlmfs_ops;
        sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
index c18ab45f8d210ca2032864069b3701b948aa0cbd..5308841756be24f6e682e361a35f1f7db00f9568 100644 (file)
@@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
+       unsigned long index = abs_from >> PAGE_SHIFT;
        handle_t *handle;
        int ret = 0;
        unsigned zero_from, zero_to, block_start, block_end;
        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
        BUG_ON(abs_from >= abs_to);
-       BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+       BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
        handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        }
 
        /* Get the offsets within the page that we want to zero */
-       zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
-       zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+       zero_from = abs_from & (PAGE_SIZE - 1);
+       zero_to = abs_to & (PAGE_SIZE - 1);
        if (!zero_to)
-               zero_to = PAGE_CACHE_SIZE;
+               zero_to = PAGE_SIZE;
 
        trace_ocfs2_write_zero_page(
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 
 out_unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 out_commit_trans:
        if (handle)
                ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
        BUG_ON(range_start >= range_end);
 
        while (zero_pos < range_end) {
-               next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+               next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
                if (next_pos > range_end)
                        next_pos = range_end;
                rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
index 9ea081f4e6e4613a18a51c21f95de5a3f14183fb..71545ad4628ce8bc37b377abcf1983d169d38f48 100644 (file)
@@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
        struct inode *inode = file_inode(file);
        struct address_space *mapping = inode->i_mapping;
        loff_t pos = page_offset(page);
-       unsigned int len = PAGE_CACHE_SIZE;
+       unsigned int len = PAGE_SIZE;
        pgoff_t last_index;
        struct page *locked_page = NULL;
        void *fsdata;
        loff_t size = i_size_read(inode);
 
-       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       last_index = (size - 1) >> PAGE_SHIFT;
 
        /*
         * There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
+               len = ((size - 1) & ~PAGE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
                                       &locked_page, &fsdata, di_bh, page);
index 6cf6538a065160ec815d3c92549029dc14698265..e63af7ddfe688f85516fd9f64d3600152b118e64 100644 (file)
@@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
        u32 clusters = pg_index;
        unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
 
-       if (unlikely(PAGE_CACHE_SHIFT > cbits))
-               clusters = pg_index << (PAGE_CACHE_SHIFT - cbits);
-       else if (PAGE_CACHE_SHIFT < cbits)
-               clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT);
+       if (unlikely(PAGE_SHIFT > cbits))
+               clusters = pg_index << (PAGE_SHIFT - cbits);
+       else if (PAGE_SHIFT < cbits)
+               clusters = pg_index >> (cbits - PAGE_SHIFT);
 
        return clusters;
 }
@@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
        unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
         pgoff_t index = clusters;
 
-       if (PAGE_CACHE_SHIFT > cbits) {
-               index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits);
-       } else if (PAGE_CACHE_SHIFT < cbits) {
-               index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT);
+       if (PAGE_SHIFT > cbits) {
+               index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
+       } else if (PAGE_SHIFT < cbits) {
+               index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
        }
 
        return index;
@@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
        unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
        unsigned int pages_per_cluster = 1;
 
-       if (PAGE_CACHE_SHIFT < cbits)
-               pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT);
+       if (PAGE_SHIFT < cbits)
+               pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
 
        return pages_per_cluster;
 }
index 3892f3c079ca83dae1a8b92bf6263cf4b3851be5..ab6a6cdcf91c856d5ff1fcc04692a6abb9f141f4 100644 (file)
@@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
        int status = 0;
 
        trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+       if (!sb_has_quota_loaded(sb, type)) {
+               status = -ESRCH;
+               goto out;
+       }
        status = ocfs2_lock_global_qf(info, 0);
        if (status < 0)
                goto out;
@@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
 out_global:
        ocfs2_unlock_global_qf(info, 0);
 out:
-       /* Avoid logging ENOENT since it just means there isn't next ID */
-       if (status && status != -ENOENT)
+       /*
+        * Avoid logging ENOENT since it just means there isn't next ID and
+        * ESRCH which means quota isn't enabled for the filesystem.
+        */
+       if (status && status != -ENOENT && status != -ESRCH)
                mlog_errno(status);
        return status;
 }
index 3eff031aaf264df58b74c4165749b0c2385c6312..744d5d90c363a182812e2c7de7ecf4971452ce3c 100644 (file)
@@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                end = i_size_read(inode);
 
        while (offset < end) {
-               page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+               page_index = offset >> PAGE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
                /* from, to is the offset within the page. */
-               from = offset & (PAGE_CACHE_SIZE - 1);
-               to = PAGE_CACHE_SIZE;
-               if (map_end & (PAGE_CACHE_SIZE - 1))
-                       to = map_end & (PAGE_CACHE_SIZE - 1);
+               from = offset & (PAGE_SIZE - 1);
+               to = PAGE_SIZE;
+               if (map_end & (PAGE_SIZE - 1))
+                       to = map_end & (PAGE_SIZE - 1);
 
                page = find_or_create_page(mapping, page_index, GFP_NOFS);
                if (!page) {
@@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                }
 
                /*
-                * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+                * In case PAGE_SIZE <= CLUSTER_SIZE, This page
                 * can't be dirtied before we CoW it out.
                 */
-               if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
                        BUG_ON(PageDirty(page));
 
                if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                mark_page_accessed(page);
 unlock:
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
                offset = map_end;
                if (ret)
@@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
        }
 
        while (offset < end) {
-               page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+               page_index = offset >> PAGE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
@@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
                        mark_page_accessed(page);
 
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
                offset = map_end;
                if (ret)
index 7db631e1c8b0a6fac59a4759545f38e6a46eb9ab..d7cae3327de5caab7c23b64c9e68460010b4317f 100644 (file)
@@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
        /*
         * We might be limited by page cache size.
         */
-       if (bytes > PAGE_CACHE_SIZE) {
-               bytes = PAGE_CACHE_SIZE;
+       if (bytes > PAGE_SIZE) {
+               bytes = PAGE_SIZE;
                trim = 1;
                /*
                 * Shift by 31 here so that we don't get larger than
index ba7dec40771e6d902e43551efcba9f528034faf1..324f0af40d7bd3c4c014c5e8de7a7b2b56e129b7 100644 (file)
@@ -153,7 +153,6 @@ static int orangefs_readdir(struct file *file, struct dir_context *ctx)
        struct dentry *dentry = file->f_path.dentry;
        struct orangefs_kernel_op_s *new_op = NULL;
        struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
-       int buffer_full = 0;
        struct orangefs_readdir_response_s readdir_response;
        void *dents_buf;
        int i = 0;
@@ -350,8 +349,7 @@ get_new_buffer_index:
        /*
         * Did we hit the end of the directory?
         */
-       if (readdir_response.token == ORANGEFS_READDIR_END &&
-           !buffer_full) {
+       if (readdir_response.token == ORANGEFS_READDIR_END) {
                gossip_debug(GOSSIP_DIR_DEBUG,
                "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
                ctx->pos = ORANGEFS_READDIR_END;
index 2382e267b49e355a066511bcdda73f435ed2b292..85640e955cde93dfe40b70768aa78d6cb660ac41 100644 (file)
@@ -18,8 +18,8 @@ static int read_one_page(struct page *page)
        int max_block;
        ssize_t bytes_read = 0;
        struct inode *inode = page->mapping->host;
-       const __u32 blocksize = PAGE_CACHE_SIZE;        /* inode->i_blksize */
-       const __u32 blockbits = PAGE_CACHE_SHIFT;       /* inode->i_blkbits */
+       const __u32 blocksize = PAGE_SIZE;      /* inode->i_blksize */
+       const __u32 blockbits = PAGE_SHIFT;     /* inode->i_blkbits */
        struct iov_iter to;
        struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
 
@@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file,
                                "failure adding page to cache, read_one_page returned: %d\n",
                                ret);
              } else {
-                       page_cache_release(page);
+                       put_page(page);
              }
        }
        BUG_ON(!list_empty(pages));
@@ -204,22 +204,8 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
        if (ret != 0)
                return ret;
 
-       /*
-        * Only change the c/mtime if we are changing the size or we are
-        * explicitly asked to change it.  This handles the semantic difference
-        * between truncate() and ftruncate() as implemented in the VFS.
-        *
-        * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
-        * special case where we need to update the times despite not having
-        * these flags set.  For all other operations the VFS set these flags
-        * explicitly if it wants a timestamp update.
-        */
-       if (orig_size != i_size_read(inode) &&
-           !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
-               iattr->ia_ctime = iattr->ia_mtime =
-                       current_fs_time(inode->i_sb);
+       if (orig_size != i_size_read(inode))
                iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
-       }
 
        return ret;
 }
@@ -328,7 +314,7 @@ static int orangefs_init_iops(struct inode *inode)
        case S_IFREG:
                inode->i_op = &orangefs_file_inode_operations;
                inode->i_fop = &orangefs_file_operations;
-               inode->i_blkbits = PAGE_CACHE_SHIFT;
+               inode->i_blkbits = PAGE_SHIFT;
                break;
        case S_IFLNK:
                inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +442,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-       inode->i_size = PAGE_CACHE_SIZE;
+       inode->i_size = PAGE_SIZE;
        inode->i_rdev = dev;
 
        error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
index 1f8acc9f9a888c3ae3ae944e480301fd219653b2..75375e90a63f32e4bac0ef3e996c960c673b250e 100644 (file)
@@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
        int i;
 
        for (i = 0; i < bufmap->page_count; i++)
-               page_cache_release(bufmap->page_array[i]);
+               put_page(bufmap->page_array[i]);
 }
 
 static void
@@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
 
                for (i = 0; i < ret; i++) {
                        SetPageError(bufmap->page_array[i]);
-                       page_cache_release(bufmap->page_array[i]);
+                       put_page(bufmap->page_array[i]);
                }
                return -ENOMEM;
        }
index 19670b8b4053b49a6c40da82464ff75000bb84da..1714a737d5563ba3fd6dbbd468837b9e9f66de7e 100644 (file)
@@ -126,8 +126,7 @@ out:
 
 void orangefs_debugfs_cleanup(void)
 {
-       if (debug_dir)
-               debugfs_remove_recursive(debug_dir);
+       debugfs_remove_recursive(debug_dir);
 }
 
 /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
index 40f5163b56aa02142b4be4ed64eabd5a96ccfc18..2d129b5886eeb7a61d067502900dca75cf525d08 100644 (file)
@@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
                }
                break;
        case S_IFDIR:
-               inode->i_size = PAGE_CACHE_SIZE;
+               inode->i_size = PAGE_SIZE;
                orangefs_inode->blksize = (1 << inode->i_blkbits);
                spin_lock(&inode->i_lock);
                inode_set_bytes(inode, inode->i_size);
@@ -315,9 +315,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
                        inode->i_size = (loff_t)strlen(new_op->
                            downcall.resp.getattr.link_target);
                        orangefs_inode->blksize = (1 << inode->i_blkbits);
-                       strlcpy(orangefs_inode->link_target,
+                       ret = strscpy(orangefs_inode->link_target,
                            new_op->downcall.resp.getattr.link_target,
                            ORANGEFS_NAME_MAX);
+                       if (ret == -E2BIG) {
+                               ret = -EIO;
+                               goto out;
+                       }
                        inode->i_link = orangefs_inode->link_target;
                }
                break;
index 50578a28bd9ea64976cfeb59182b0ae21b038384..1efc6f8a5224cb0cd1ebb84b793cb68042e70442 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
 #include <linux/slab.h>
@@ -74,8 +75,8 @@ static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
                                   void *p, int size)
 {
 
-       memset(p, 0, size);
        memcpy(p, kh->u, 16);
+       memset(p + 16, 0, size - 16);
 
 }
 
@@ -427,26 +428,28 @@ struct ORANGEFS_dev_map_desc {
 /* gossip.h *****************************************************************/
 
 #ifdef GOSSIP_DISABLE_DEBUG
-#define gossip_debug(mask, format, f...) do {} while (0)
+#define gossip_debug(mask, fmt, ...)                                   \
+do {                                                                   \
+       if (0)                                                          \
+               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
+} while (0)
 #else
 extern __u64 gossip_debug_mask;
 extern struct client_debug_mask client_debug_mask;
 
 /* try to avoid function call overhead by checking masks in macro */
-#define gossip_debug(mask, format, f...)                       \
-do {                                                           \
-       if (gossip_debug_mask & mask)                           \
-               printk(format, ##f);                            \
+#define gossip_debug(mask, fmt, ...)                                   \
+do {                                                                   \
+       if (gossip_debug_mask & (mask))                                 \
+               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
 } while (0)
 #endif /* GOSSIP_DISABLE_DEBUG */
 
 /* do file and line number printouts w/ the GNU preprocessor */
-#define gossip_ldebug(mask, format, f...)                              \
-               gossip_debug(mask, "%s: " format, __func__, ##f)
-
-#define gossip_err printk
-#define gossip_lerr(format, f...)                                      \
-               gossip_err("%s line %d: " format,                       \
-                          __FILE__,                                    \
-                          __LINE__,                                    \
-                          ##f)
+#define gossip_ldebug(mask, fmt, ...)                                  \
+       gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define gossip_err pr_err
+#define gossip_lerr(fmt, ...)                                          \
+       gossip_err("%s line %d: " fmt,                                  \
+                  __FILE__, __LINE__, ##__VA_ARGS__)
index ef5da7538cd5177f78c31542121ad36f2d24115e..63a6280d8c3a415df407ce96bd4833a99538fde0 100644 (file)
@@ -73,10 +73,6 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
                     "%s: prefix %s name %s, buffer_size %zd\n",
                     __func__, prefix, name, size);
 
-       if (name == NULL || (size > 0 && buffer == NULL)) {
-               gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
-               return -EINVAL;
-       }
        if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
                gossip_err("Invalid key length (%d)\n",
                           (int)(strlen(name) + strlen(prefix)));
@@ -146,8 +142,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
                goto out_release_op;
        }
 
-       memset(buffer, 0, size);
        memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+       memset(buffer + length, 0, size - length);
        gossip_debug(GOSSIP_XATTR_DEBUG,
             "orangefs_inode_getxattr: inode %pU "
             "key %s key_sz %d, val_len %d\n",
@@ -239,8 +235,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
                     "%s: prefix %s, name %s, buffer_size %zd\n",
                     __func__, prefix, name, size);
 
-       if (size < 0 ||
-           size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+       if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
            flags < 0) {
                gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
                           (int)size,
@@ -248,12 +243,6 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
                return -EINVAL;
        }
 
-       if (name == NULL ||
-           (size > 0 && value == NULL)) {
-               gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
-               return -EINVAL;
-       }
-
        internal_flag = convert_to_internal_xattr_flags(flags);
 
        if (prefix) {
@@ -353,10 +342,6 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
                gossip_err("%s: bogus NULL pointers\n", __func__);
                return -EINVAL;
        }
-       if (size < 0) {
-               gossip_err("Invalid size (%d)\n", (int)size);
-               return -EINVAL;
-       }
 
        down_read(&orangefs_inode->xattr_sem);
        new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
index ef64984c9bbcec1f765ed0601dbfb9082bc44f42..5d972e6cd3fe97fcae5c782b59c72c0007668566 100644 (file)
@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
        }
 }
 
+static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
+{
+       struct dentry *real;
+
+       if (d_is_dir(dentry)) {
+               if (!inode || inode == d_inode(dentry))
+                       return dentry;
+               goto bug;
+       }
+
+       real = ovl_dentry_upper(dentry);
+       if (real && (!inode || inode == d_inode(real)))
+               return real;
+
+       real = ovl_dentry_lower(dentry);
+       if (!real)
+               goto bug;
+
+       if (!inode || inode == d_inode(real))
+               return real;
+
+       /* Handle recursion */
+       if (real->d_flags & DCACHE_OP_REAL)
+               return real->d_op->d_real(real, inode);
+
+bug:
+       WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
+            inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+       return dentry;
+}
+
 static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
 static const struct dentry_operations ovl_dentry_operations = {
        .d_release = ovl_dentry_release,
        .d_select_inode = ovl_d_select_inode,
+       .d_real = ovl_d_real,
 };
 
 static const struct dentry_operations ovl_reval_dentry_operations = {
        .d_release = ovl_dentry_release,
        .d_select_inode = ovl_d_select_inode,
+       .d_real = ovl_d_real,
        .d_revalidate = ovl_dentry_revalidate,
        .d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
index ab8dad3ccb6a8bac13a2eab25eeb2b68791d60f5..0d3f5165cb0b8cb8863b9df5253ca688294c7440 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
        if (page_count(page) == 1 && !pipe->tmp_page)
                pipe->tmp_page = page;
        else
-               page_cache_release(page);
+               put_page(page);
 }
 
 /**
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
  */
 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-       page_cache_get(buf->page);
+       get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm);
 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
                              struct pipe_buffer *buf)
 {
-       page_cache_release(buf->page);
+       put_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_release);
 
index c524fdddc7fb1f601d6d06ddb46d0affcca89c08..99899705b1055411723c3e5919ecbc1f50139e14 100644 (file)
@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
 
 /* all accesses are serialized by namespace_sem */
 static struct user_namespace *user_ns;
-static struct mount *last_dest, *last_source, *dest_master;
+static struct mount *last_dest, *first_source, *last_source, *dest_master;
 static struct mountpoint *mp;
 static struct hlist_head *list;
 
@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
                type = CL_MAKE_SHARED;
        } else {
                struct mount *n, *p;
+               bool done;
                for (n = m; ; n = p) {
                        p = n->mnt_master;
-                       if (p == dest_master || IS_MNT_MARKED(p)) {
-                               while (last_dest->mnt_master != p) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
-                               if (!peers(n, last_dest)) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
+                       if (p == dest_master || IS_MNT_MARKED(p))
                                break;
-                       }
                }
+               do {
+                       struct mount *parent = last_source->mnt_parent;
+                       if (last_source == first_source)
+                               break;
+                       done = parent->mnt_master == p;
+                       if (done && peers(n, parent))
+                               break;
+                       last_source = last_source->mnt_master;
+               } while (!done);
+
                type = CL_SLAVE;
                /* beginning of peer group among the slaves? */
                if (IS_MNT_SHARED(m))
@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
         */
        user_ns = current->nsproxy->mnt_ns->user_ns;
        last_dest = dest_mnt;
+       first_source = source_mnt;
        last_source = source_mnt;
        mp = dest_mp;
        list = tree_list;
index b1755b23893e5e34513582d51d7a9cf9ed6b7227..92e37e224cd22d66d0b9675f1790e110d5e7b415 100644 (file)
@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        struct mm_struct *mm = file->private_data;
        unsigned long env_start, env_end;
 
-       if (!mm)
+       /* Ensure the process spawned far enough to have an environment. */
+       if (!mm || !mm->env_end)
                return 0;
 
        page = (char *)__get_free_page(GFP_TEMPORARY);
index 9df4316420422db9f7926b1f160f9bd554048535..541583510cfb996c7461ba98bf2a34741445342b 100644 (file)
@@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                if (radix_tree_exceptional_entry(page))
                        mss->swap += PAGE_SIZE;
                else
-                       page_cache_release(page);
+                       put_page(page);
 
                return;
        }
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
        return page;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+                                             struct vm_area_struct *vma,
+                                             unsigned long addr)
+{
+       struct page *page;
+       int nid;
+
+       if (!pmd_present(pmd))
+               return NULL;
+
+       page = vm_normal_page_pmd(vma, addr, pmd);
+       if (!page)
+               return NULL;
+
+       if (PageReserved(page))
+               return NULL;
+
+       nid = page_to_nid(page);
+       if (!node_isset(nid, node_states[N_MEMORY]))
+               return NULL;
+
+       return page;
+}
+#endif
+
 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
                unsigned long end, struct mm_walk *walk)
 {
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        pte_t *orig_pte;
        pte_t *pte;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
-               pte_t huge_pte = *(pte_t *)pmd;
                struct page *page;
 
-               page = can_gather_numa_stats(huge_pte, vma, addr);
+               page = can_gather_numa_stats_pmd(*pmd, vma, addr);
                if (page)
-                       gather_stats(page, md, pte_dirty(huge_pte),
+                       gather_stats(page, md, pmd_dirty(*pmd),
                                     HPAGE_PMD_SIZE/PAGE_SIZE);
                spin_unlock(ptl);
                return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 
        if (pmd_trans_unstable(pmd))
                return 0;
+#endif
        orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        do {
                struct page *page = can_gather_numa_stats(*pte, vma, addr);
index 55bb57e6a30d3f3bae7b6db04b1f827e2885eff4..8afe10cf7df80f042ec1c3a34b3c3d412f5a3fce 100644 (file)
@@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (!page)
                return VM_FAULT_OOM;
        if (!PageUptodate(page)) {
-               offset = (loff_t) index << PAGE_CACHE_SHIFT;
+               offset = (loff_t) index << PAGE_SHIFT;
                buf = __va((page_to_pfn(page) << PAGE_SHIFT));
                rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
                if (rc < 0) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
                }
                SetPageUptodate(page);
index dc645b66cd79aea96ea723f8b127a16f7d6f70cd..45d6110744cb77255a2705f19176240e9be65e6f 100644 (file)
@@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
        pstore_sb = sb;
 
        sb->s_maxbytes          = MAX_LFS_FILESIZE;
-       sb->s_blocksize         = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+       sb->s_blocksize         = PAGE_SIZE;
+       sb->s_blocksize_bits    = PAGE_SHIFT;
        sb->s_magic             = PSTOREFS_MAGIC;
        sb->s_op                = &pstore_ops;
        sb->s_time_gran         = 1;
index e1f37278cf97bfc0e7973c2f860062cc02c70cbe..144ceda4948e92ab58e2ba7d524c9bf73afd75f2 100644 (file)
@@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
 static unsigned last_entry(struct inode *inode, unsigned long page_nr)
 {
        unsigned long last_byte = inode->i_size;
-       last_byte -= page_nr << PAGE_CACHE_SHIFT;
-       if (last_byte > PAGE_CACHE_SIZE)
-               last_byte = PAGE_CACHE_SIZE;
+       last_byte -= page_nr << PAGE_SHIFT;
+       if (last_byte > PAGE_SIZE)
+               last_byte = PAGE_SIZE;
        return last_byte / QNX6_DIR_ENTRY_SIZE;
 }
 
@@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
 {
        struct qnx6_sb_info *sbi = QNX6_SB(sb);
        u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
-       u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */
+       u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
        /* within page */
-       u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK;
+       u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
        struct address_space *mapping = sbi->longfile->i_mapping;
        struct page *page = read_mapping_page(mapping, n, NULL);
        if (IS_ERR(page))
@@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
        struct qnx6_sb_info *sbi = QNX6_SB(s);
        loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
        unsigned long npages = dir_pages(inode);
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
-       unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
+       unsigned long n = pos >> PAGE_SHIFT;
+       unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
        bool done = false;
 
        ctx->pos = pos;
@@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
 
                if (IS_ERR(page)) {
                        pr_err("%s(): read failed\n", __func__);
-                       ctx->pos = (n + 1) << PAGE_CACHE_SHIFT;
+                       ctx->pos = (n + 1) << PAGE_SHIFT;
                        return PTR_ERR(page);
                }
                de = ((struct qnx6_dir_entry *)page_address(page)) + start;
index 47bb1de07155e27a3ca553c16efa3a138c64b16a..1192422a1c5628e5782961252e2a1bdab58237c7 100644 (file)
@@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
                iget_failed(inode);
                return ERR_PTR(-EIO);
        }
-       n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS);
-       offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS);
+       n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
+       offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
        mapping = sbi->inodes->i_mapping;
        page = read_mapping_page(mapping, n, NULL);
        if (IS_ERR(page)) {
index d3fb2b698800298184e72db2489f491d91948ba9..f23b5c4a66ad753b8299f5e463bcd1965cfd4de6 100644 (file)
@@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
 static inline void qnx6_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
index ba827daea5a0b2a7883327a18a83a43315c9bd2a..ff21980d0119a58aa41c825258e435aa799042c7 100644 (file)
@@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
        struct quota_info *dqopt = sb_dqopt(sb);
        int err;
 
-       if (!dqopt->ops[qid->type]->get_next_id)
-               return -ENOSYS;
+       mutex_lock(&dqopt->dqonoff_mutex);
+       if (!sb_has_quota_active(sb, qid->type)) {
+               err = -ESRCH;
+               goto out;
+       }
+       if (!dqopt->ops[qid->type]->get_next_id) {
+               err = -ENOSYS;
+               goto out;
+       }
        mutex_lock(&dqopt->dqio_mutex);
        err = dqopt->ops[qid->type]->get_next_id(sb, qid);
        mutex_unlock(&dqopt->dqio_mutex);
+out:
+       mutex_unlock(&dqopt->dqonoff_mutex);
 
        return err;
 }
index d07a2f91d858091468f6e7fe3b974989959c64db..8b252673d4540b519e4a46096869ff039246e614 100644 (file)
@@ -47,7 +47,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
        void *msg_head;
        int ret;
        int msg_size = 4 * nla_total_size(sizeof(u32)) +
-                      2 * nla_total_size(sizeof(u64));
+                      2 * nla_total_size_64bit(sizeof(u64));
 
        /* We have to allocate using GFP_NOFS as we are called from a
         * filesystem performing write and thus further recursion into
@@ -68,8 +68,9 @@ void quota_send_warning(struct kqid qid, dev_t dev,
        ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
        if (ret)
                goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID,
-                         from_kqid_munged(&init_user_ns, qid));
+       ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
+                               from_kqid_munged(&init_user_ns, qid),
+                               QUOTA_NL_A_PAD);
        if (ret)
                goto attr_err_out;
        ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
@@ -81,8 +82,9 @@ void quota_send_warning(struct kqid qid, dev_t dev,
        ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
        if (ret)
                goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID,
-                         from_kuid_munged(&init_user_ns, current_uid()));
+       ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
+                               from_kuid_munged(&init_user_ns, current_uid()),
+                               QUOTA_NL_A_PAD);
        if (ret)
                goto attr_err_out;
        genlmsg_end(skb, msg_head);
index 38981b03752433de71197c5c724999a33dc2d53d..1ab6e6c2e60e72f14c3749cf4a3bff657b5b3244 100644 (file)
@@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
                return err;
 
        sb->s_maxbytes          = MAX_LFS_FILESIZE;
-       sb->s_blocksize         = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+       sb->s_blocksize         = PAGE_SIZE;
+       sb->s_blocksize_bits    = PAGE_SHIFT;
        sb->s_magic             = RAMFS_MAGIC;
        sb->s_op                = &ramfs_ops;
        sb->s_time_gran         = 1;
index 9424a4ba93a9504b12c75c08d0f4a4cd8f49fe20..389773711de4cf0aafc2a4002c484b8bbd7eb984 100644 (file)
@@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
        int partial = 0;
        unsigned blocksize;
        struct buffer_head *bh, *head;
-       unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
+       unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
        int new;
        int logit = reiserfs_file_data_log(inode);
        struct super_block *s = inode->i_sb;
-       int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+       int bh_per_page = PAGE_SIZE / s->s_blocksize;
        struct reiserfs_transaction_handle th;
        int ret = 0;
 
index ae9e5b308cf9f5775300e7ee109e4e2f72720b12..d5c2e9c865deed5a50e48cdca85901c8c042e627 100644 (file)
@@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
                goto finished;
        }
        /* read file tail into part of page */
-       offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
+       offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
        copy_item_head(&tmp_ih, ih);
 
        /*
@@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode,
                return -EIO;
 
        /* always try to read until the end of the block */
-       tail_start = tail_offset & (PAGE_CACHE_SIZE - 1);
+       tail_start = tail_offset & (PAGE_SIZE - 1);
        tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
 
-       index = tail_offset >> PAGE_CACHE_SHIFT;
+       index = tail_offset >> PAGE_SHIFT;
        /*
         * hole_page can be zero in case of direct_io, we are sure
         * that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode,
 unlock:
        if (tail_page != hole_page) {
                unlock_page(tail_page);
-               page_cache_release(tail_page);
+               put_page(tail_page);
        }
 out:
        return retval;
@@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode,
         * we want the page with the last byte in the file,
         * not the page that will hold the next byte for appending
         */
-       unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
+       unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
        unsigned long pos = 0;
        unsigned long start = 0;
        unsigned long blocksize = inode->i_sb->s_blocksize;
-       unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
+       unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
        struct buffer_head *bh;
        struct buffer_head *head;
        struct page *page;
@@ -2251,7 +2251,7 @@ out:
 
 unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return error;
 }
 
@@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
 {
        struct reiserfs_transaction_handle th;
        /* we want the offset for the first byte after the end of the file */
-       unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+       unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
        unsigned blocksize = inode->i_sb->s_blocksize;
        unsigned length;
        struct page *page = NULL;
@@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
                        }
                }
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
 out:
        if (page) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@ research:
        } else if (is_direct_le_ih(ih)) {
                char *p;
                p = page_address(bh_result->b_page);
-               p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1);
+               p += (byte_offset - 1) & (PAGE_SIZE - 1);
                copy_size = ih_item_len(ih) - pos_in_item;
 
                fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page,
                                    struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
-       unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = inode->i_size >> PAGE_SHIFT;
        int error = 0;
        unsigned long block;
        sector_t last_block;
@@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page,
        int checked = PageChecked(page);
        struct reiserfs_transaction_handle th;
        struct super_block *s = inode->i_sb;
-       int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+       int bh_per_page = PAGE_SIZE / s->s_blocksize;
        th.t_trans_id = 0;
 
        /* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page,
        if (page->index >= end_index) {
                unsigned last_offset;
 
-               last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+               last_offset = inode->i_size & (PAGE_SIZE - 1);
                /* no file contents in this page */
                if (page->index >= end_index + 1 || !last_offset) {
                        unlock_page(page);
                        return 0;
                }
-               zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
+               zero_user_segment(page, last_offset, PAGE_SIZE);
        }
        bh = head;
-       block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
+       block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
        /* first map all the buffers, logging any direct items we find */
        do {
@@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file,
                *fsdata = (void *)(unsigned long)flags;
        }
 
-       index = pos >> PAGE_CACHE_SHIFT;
+       index = pos >> PAGE_SHIFT;
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
@@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file,
        }
        if (ret) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                /* Truncate allocated blocks */
                reiserfs_truncate_failed_write(inode);
        }
@@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
        else
                th = NULL;
 
-       start = pos & (PAGE_CACHE_SIZE - 1);
+       start = pos & (PAGE_SIZE - 1);
        if (unlikely(copied < len)) {
                if (!PageUptodate(page))
                        copied = 0;
@@ -2974,7 +2974,7 @@ out:
        if (locked)
                reiserfs_write_unlock(inode->i_sb);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        if (pos + len > inode->i_size)
                reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
                          unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
+       loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
        int ret = 0;
        int update_sd = 0;
        struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
        struct inode *inode = page->mapping->host;
        unsigned int curr_off = 0;
        unsigned int stop = offset + length;
-       int partial_page = (offset || length < PAGE_CACHE_SIZE);
+       int partial_page = (offset || length < PAGE_SIZE);
        int ret = 1;
 
        BUG_ON(!PageLocked(page));
index 036a1fc0a8c35655a2bdb6cfb5b79dfccca04d54..57045f423893f5d813dd92c5b3c621a3e0efde08 100644 (file)
@@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
         * __reiserfs_write_begin on that page.  This will force a
         * reiserfs_get_block to unpack the tail for us.
         */
-       index = inode->i_size >> PAGE_CACHE_SHIFT;
+       index = inode->i_size >> PAGE_SHIFT;
        mapping = inode->i_mapping;
        page = grab_cache_page(mapping, index);
        retval = -ENOMEM;
@@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
 
 out_unlock:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
 out:
        inode_unlock(inode);
index 44c2bdced1c87fda2f0592d9939ba5d626dc4460..2ace90e981f07a51c69c38cc16b415e221ca35d0 100644 (file)
@@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s,
  * This does a check to see if the buffer belongs to one of these
  * lost pages before doing the final put_bh.  If page->mapping was
  * null, it tries to free buffers on the page, which should make the
- * final page_cache_release drop the page from the lru.
+ * final put_page drop the page from the lru.
  */
 static void release_buffer_page(struct buffer_head *bh)
 {
        struct page *page = bh->b_page;
        if (!page->mapping && trylock_page(page)) {
-               page_cache_get(page);
+               get_page(page);
                put_bh(bh);
                if (!page->mapping)
                        try_to_free_buffers(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        } else {
                put_bh(bh);
        }
index 24cbe013240fa6559910942173bf7e89768ec49d..5feacd689241e25f346756ee2528b4b42b845d42 100644 (file)
@@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
                 */
 
                data = kmap_atomic(un_bh->b_page);
-               off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+               off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
                memcpy(data + off,
                       ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
                       ret_value);
@@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos)
 
        if (page) {
                if (page_has_buffers(page)) {
-                       tail_index = pos & (PAGE_CACHE_SIZE - 1);
+                       tail_index = pos & (PAGE_SIZE - 1);
                        cur_index = 0;
                        head = page_buffers(page);
                        bh = head;
index f41e19b4bb42f852745caff6888288d5fa04bc2e..2d5489b0a2693a007dee760995e10cd8f1e7daef 100644 (file)
@@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
         */
        if (up_to_date_bh) {
                unsigned pgoff =
-                   (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
+                   (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
                char *kaddr = kmap_atomic(up_to_date_bh->b_page);
                memset(kaddr + pgoff, 0, blk_size - total_tail);
                kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
         * the page was locked and this part of the page was up to date when
         * indirect2direct was called, so we know the bytes are still valid
         */
-       tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
+       tail = tail + (pos & (PAGE_SIZE - 1));
 
        PATH_LAST_POSITION(path)++;
 
index 57e0b23105327b298d17db42d44c2ebf8bd31715..28f5f8b113700e63e9529bba67008ffcb469a724 100644 (file)
@@ -415,7 +415,7 @@ out:
 static inline void reiserfs_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
         * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
         */
        mapping_set_gfp_mask(mapping, GFP_NOFS);
-       page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
+       page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
        if (!IS_ERR(page)) {
                kmap(page);
                if (PageError(page))
@@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
        while (buffer_pos < buffer_size || buffer_pos == 0) {
                size_t chunk;
                size_t skip = 0;
-               size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
+               size_t page_offset = (file_pos & (PAGE_SIZE - 1));
 
-               if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
-                       chunk = PAGE_CACHE_SIZE;
+               if (buffer_size - buffer_pos > PAGE_SIZE)
+                       chunk = PAGE_SIZE;
                else
                        chunk = buffer_size - buffer_pos;
 
@@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
                        struct reiserfs_xattr_header *rxh;
 
                        skip = file_pos = sizeof(struct reiserfs_xattr_header);
-                       if (chunk + skip > PAGE_CACHE_SIZE)
-                               chunk = PAGE_CACHE_SIZE - skip;
+                       if (chunk + skip > PAGE_SIZE)
+                               chunk = PAGE_SIZE - skip;
                        rxh = (struct reiserfs_xattr_header *)data;
                        rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
                        rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
                char *data;
                size_t skip = 0;
 
-               if (isize - file_pos > PAGE_CACHE_SIZE)
-                       chunk = PAGE_CACHE_SIZE;
+               if (isize - file_pos > PAGE_SIZE)
+                       chunk = PAGE_SIZE;
                else
                        chunk = isize - file_pos;
 
index e85664b7c7d963522fd7efc938a3d96a27edc651..19f532e7d35e9a501256ab64f76a645bf7d6b2e6 100644 (file)
@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
 
        mutex_init(&p->lock);
        p->op = op;
-#ifdef CONFIG_USER_NS
-       p->user_ns = file->f_cred->user_ns;
-#endif
+
+       // No refcounting: the lifetime of 'p' is constrained
+       // to the lifetime of the file.
+       p->file = file;
 
        /*
         * Wrappers around seq_open(e.g. swaps_open) need to be
index 9947b5c696649a20cf4064883efc89ae26a81b78..b018eb485019b1ed1bed2ccdc6c7b9914c575963 100644 (file)
@@ -88,7 +88,7 @@ out_unlock:
 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
                                        struct pipe_buffer *buf)
 {
-       page_cache_release(buf->page);
+       put_page(buf->page);
        buf->flags &= ~PIPE_BUF_FLAG_LRU;
 }
 
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
 
 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
 {
-       page_cache_release(spd->pages[i]);
+       put_page(spd->pages[i]);
 }
 
 /*
@@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        if (splice_grow_spd(pipe, &spd))
                return -ENOMEM;
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
-       loff = *ppos & ~PAGE_CACHE_MASK;
-       req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       index = *ppos >> PAGE_SHIFT;
+       loff = *ppos & ~PAGE_MASK;
+       req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
        nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
@@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
                        error = add_to_page_cache_lru(page, mapping, index,
                                   mapping_gfp_constraint(mapping, GFP_KERNEL));
                        if (unlikely(error)) {
-                               page_cache_release(page);
+                               put_page(page);
                                if (error == -EEXIST)
                                        continue;
                                break;
@@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
         * Now loop over the map and see if we need to start IO on any
         * pages, fill in the partial map, etc.
         */
-       index = *ppos >> PAGE_CACHE_SHIFT;
+       index = *ppos >> PAGE_SHIFT;
        nr_pages = spd.nr_pages;
        spd.nr_pages = 0;
        for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
                /*
                 * this_len is the max we'll use from this page
                 */
-               this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
+               this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
                page = spd.pages[page_nr];
 
                if (PageReadahead(page))
@@ -426,7 +426,7 @@ retry_lookup:
                                        error = -ENOMEM;
                                        break;
                                }
-                               page_cache_release(spd.pages[page_nr]);
+                               put_page(spd.pages[page_nr]);
                                spd.pages[page_nr] = page;
                        }
                        /*
@@ -456,7 +456,7 @@ fill_it:
                 * i_size must be checked after PageUptodate.
                 */
                isize = i_size_read(mapping->host);
-               end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+               end_index = (isize - 1) >> PAGE_SHIFT;
                if (unlikely(!isize || index > end_index))
                        break;
 
@@ -470,7 +470,7 @@ fill_it:
                        /*
                         * max good bytes in this page
                         */
-                       plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+                       plen = ((isize - 1) & ~PAGE_MASK) + 1;
                        if (plen <= loff)
                                break;
 
@@ -494,8 +494,8 @@ fill_it:
         * we got, 'nr_pages' is how many pages are in the map.
         */
        while (page_nr < nr_pages)
-               page_cache_release(spd.pages[page_nr++]);
-       in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+               put_page(spd.pages[page_nr++]);
+       in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
 
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
                        goto shrink_ret;
        }
 
-       offset = *ppos & ~PAGE_CACHE_MASK;
-       nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       offset = *ppos & ~PAGE_MASK;
+       nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
@@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
                if (!page)
                        goto err;
 
-               this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+               this_len = min_t(size_t, len, PAGE_SIZE - offset);
                vec[i].iov_base = (void __user *) page_address(page);
                vec[i].iov_len = this_len;
                spd.pages[i] = page;
index 0cea9b9236d07c81d0cc46c0b22aeba334cc645d..2c2618410d51b92113fe9a00173078abace498f5 100644 (file)
@@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                        in = min(bytes, msblk->devblksize - offset);
                        bytes -= in;
                        while (in) {
-                               if (pg_offset == PAGE_CACHE_SIZE) {
+                               if (pg_offset == PAGE_SIZE) {
                                        data = squashfs_next_page(output);
                                        pg_offset = 0;
                                }
-                               avail = min_t(int, in, PAGE_CACHE_SIZE -
+                               avail = min_t(int, in, PAGE_SIZE -
                                                pg_offset);
                                memcpy(data + pg_offset, bh[k]->b_data + offset,
                                                avail);
index 1cb70a0b216844a136bf3b47ee6534d22496bb1c..23813c078cc9527f547c345ba01ce31dafd570ab 100644 (file)
@@ -30,7 +30,7 @@
  * access the metadata and fragment caches.
  *
  * To avoid out of memory and fragmentation issues with vmalloc the cache
- * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ * uses sequences of kmalloced PAGE_SIZE buffers.
  *
  * It should be noted that the cache is not used for file datablocks, these
  * are decompressed and cached in the page-cache in the normal way.  The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
 /*
  * Initialise cache allocating the specified number of entries, each of
  * size block_size.  To avoid vmalloc fragmentation issues each entry
- * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
  */
 struct squashfs_cache *squashfs_cache_init(char *name, int entries,
        int block_size)
@@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
        cache->unused = entries;
        cache->entries = entries;
        cache->block_size = block_size;
-       cache->pages = block_size >> PAGE_CACHE_SHIFT;
+       cache->pages = block_size >> PAGE_SHIFT;
        cache->pages = cache->pages ? cache->pages : 1;
        cache->name = name;
        cache->num_waiters = 0;
@@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
                }
 
                for (j = 0; j < cache->pages; j++) {
-                       entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+                       entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        if (entry->data[j] == NULL) {
                                ERROR("Failed to allocate %s buffer\n", name);
                                goto cleanup;
@@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
                return min(length, entry->length - offset);
 
        while (offset < entry->length) {
-               void *buff = entry->data[offset / PAGE_CACHE_SIZE]
-                               + (offset % PAGE_CACHE_SIZE);
+               void *buff = entry->data[offset / PAGE_SIZE]
+                               + (offset % PAGE_SIZE);
                int bytes = min_t(int, entry->length - offset,
-                               PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
+                               PAGE_SIZE - (offset % PAGE_SIZE));
 
                if (bytes >= remaining) {
                        memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
  */
 void *squashfs_read_table(struct super_block *sb, u64 block, int length)
 {
-       int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
        int i, res;
        void *table, *buffer, **data;
        struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
                goto failed2;
        }
 
-       for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
+       for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
                data[i] = buffer;
 
        res = squashfs_read_data(sb, block, length |
index e9034bf6e5ae27bb3e2379e0816e2b11d5ed41b1..d2bc13636f7927a7485bb153c4d0883a7b2ea1e2 100644 (file)
@@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)
         * Read decompressor specific options from file system if present
         */
        if (SQUASHFS_COMP_OPTS(flags)) {
-               buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+               buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
                if (buffer == NULL) {
                        comp_opts = ERR_PTR(-ENOMEM);
                        goto out;
index e5c9689062ba81fff5db08f50c2d39d53c4508d9..13d80947bf9e6adac348878e3494b38cdd206099 100644 (file)
@@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n,
 {
        int err, i;
        long long block = 0;
-       __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+       __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
 
        if (blist == NULL) {
                ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n,
        }
 
        while (n) {
-               int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2);
+               int blocks = min_t(int, n, PAGE_SIZE >> 2);
 
                err = squashfs_read_metadata(sb, blist, start_block,
                                offset, blocks << 2);
@@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
        struct inode *inode = page->mapping->host;
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        void *pageaddr;
-       int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+       int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
        int start_index = page->index & ~mask, end_index = start_index | mask;
 
        /*
         * Loop copying datablock into pages.  As the datablock likely covers
-        * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+        * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
         * grab the pages from the page cache, except for the page that we've
         * been called to fill.
         */
        for (i = start_index; i <= end_index && bytes > 0; i++,
-                       bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+                       bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
                struct page *push_page;
-               int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
+               int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
 
                TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
 
@@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
 
                pageaddr = kmap_atomic(push_page);
                squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
                kunmap_atomic(pageaddr);
                flush_dcache_page(push_page);
                SetPageUptodate(push_page);
 skip_page:
                unlock_page(push_page);
                if (i != page->index)
-                       page_cache_release(push_page);
+                       put_page(push_page);
        }
 }
 
@@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-       int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+       int index = page->index >> (msblk->block_log - PAGE_SHIFT);
        int file_end = i_size_read(inode) >> msblk->block_log;
        int res;
        void *pageaddr;
@@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page)
        TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
                                page->index, squashfs_i(inode)->start);
 
-       if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-                                       PAGE_CACHE_SHIFT))
+       if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+                                       PAGE_SHIFT))
                goto out;
 
        if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@ error_out:
        SetPageError(page);
 out:
        pageaddr = kmap_atomic(page);
-       memset(pageaddr, 0, PAGE_CACHE_SIZE);
+       memset(pageaddr, 0, PAGE_SIZE);
        kunmap_atomic(pageaddr);
        flush_dcache_page(page);
        if (!PageError(page))
index 43e7a7eddac03cf7ca659cd891ca219bf702aade..cb485d8e0e91b1b2ff1cb9b0330339c51c15b8a4 100644 (file)
@@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
        struct inode *inode = target_page->mapping->host;
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 
-       int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
-       int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+       int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+       int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
        int start_index = target_page->index & ~mask;
        int end_index = start_index | mask;
        int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
 
                if (PageUptodate(page[i])) {
                        unlock_page(page[i]);
-                       page_cache_release(page[i]);
+                       put_page(page[i]);
                        page[i] = NULL;
                        missing_pages++;
                }
@@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
                goto mark_errored;
 
        /* Last page may have trailing bytes not filled */
-       bytes = res % PAGE_CACHE_SIZE;
+       bytes = res % PAGE_SIZE;
        if (bytes) {
                pageaddr = kmap_atomic(page[pages - 1]);
-               memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+               memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
                kunmap_atomic(pageaddr);
        }
 
@@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
                SetPageUptodate(page[i]);
                unlock_page(page[i]);
                if (page[i] != target_page)
-                       page_cache_release(page[i]);
+                       put_page(page[i]);
        }
 
        kfree(actor);
@@ -127,7 +127,7 @@ mark_errored:
                flush_dcache_page(page[i]);
                SetPageError(page[i]);
                unlock_page(page[i]);
-               page_cache_release(page[i]);
+               put_page(page[i]);
        }
 
 out:
@@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
        }
 
        for (n = 0; n < pages && bytes > 0; n++,
-                       bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
-               int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+                       bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
+               int avail = min_t(int, bytes, PAGE_SIZE);
 
                if (page[n] == NULL)
                        continue;
 
                pageaddr = kmap_atomic(page[n]);
                squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
                kunmap_atomic(pageaddr);
                flush_dcache_page(page[n]);
                SetPageUptodate(page[n]);
                unlock_page(page[n]);
                if (page[n] != target_page)
-                       page_cache_release(page[n]);
+                       put_page(page[n]);
        }
 
 out:
index c31e2bc9c08151e6895d32c8bd8220e6042d09ef..ff4468bd18b02586394e8442abe2631b1fb548aa 100644 (file)
@@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
        data = squashfs_first_page(output);
        buff = stream->output;
        while (data) {
-               if (bytes <= PAGE_CACHE_SIZE) {
+               if (bytes <= PAGE_SIZE) {
                        memcpy(data, buff, bytes);
                        break;
                }
-               memcpy(data, buff, PAGE_CACHE_SIZE);
-               buff += PAGE_CACHE_SIZE;
-               bytes -= PAGE_CACHE_SIZE;
+               memcpy(data, buff, PAGE_SIZE);
+               buff += PAGE_SIZE;
+               bytes -= PAGE_SIZE;
                data = squashfs_next_page(output);
        }
        squashfs_finish_page(output);
index 244b9fbfff7b299195585320328cfc7540e6887e..934c17e965908eccff7e23729f8b9f27a0425628 100644 (file)
@@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
        data = squashfs_first_page(output);
        buff = stream->output;
        while (data) {
-               if (bytes <= PAGE_CACHE_SIZE) {
+               if (bytes <= PAGE_SIZE) {
                        memcpy(data, buff, bytes);
                        break;
                } else {
-                       memcpy(data, buff, PAGE_CACHE_SIZE);
-                       buff += PAGE_CACHE_SIZE;
-                       bytes -= PAGE_CACHE_SIZE;
+                       memcpy(data, buff, PAGE_SIZE);
+                       buff += PAGE_SIZE;
+                       bytes -= PAGE_SIZE;
                        data = squashfs_next_page(output);
                }
        }
index 5a1c11f5644153b6a644d45fb1669ab0b3ef1f13..9b7b1b6a78926b605843119ab04ed5cc9524181a 100644 (file)
@@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
        if (actor == NULL)
                return NULL;
 
-       actor->length = length ? : pages * PAGE_CACHE_SIZE;
+       actor->length = length ? : pages * PAGE_SIZE;
        actor->buffer = buffer;
        actor->pages = pages;
        actor->next_page = 0;
@@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
        if (actor == NULL)
                return NULL;
 
-       actor->length = length ? : pages * PAGE_CACHE_SIZE;
+       actor->length = length ? : pages * PAGE_SIZE;
        actor->page = page;
        actor->pages = pages;
        actor->next_page = 0;
index 26dd82008b82c6b91bac678fdec131c1ecbc5167..98537eab27e270d8b04f04b7d0db2ee519e21d46 100644 (file)
@@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
        if (actor == NULL)
                return NULL;
 
-       actor->length = length ? : pages * PAGE_CACHE_SIZE;
+       actor->length = length ? : pages * PAGE_SIZE;
        actor->page = page;
        actor->pages = pages;
        actor->next_page = 0;
index 5e79bfa4f260784dcba925e4d6611585542d88c6..cf01e15a7b16dff288e2479014d20e0d787096d5 100644 (file)
@@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
         * Check the system page size is not larger than the filesystem
         * block size (by default 128K).  This is currently not supported.
         */
-       if (PAGE_CACHE_SIZE > msblk->block_size) {
+       if (PAGE_SIZE > msblk->block_size) {
                ERROR("Page size > filesystem block size (%d).  This is "
                        "currently not supported!\n", msblk->block_size);
                goto failed_mount;
index dbcc2f54bad46f9af564e37f4e9b153e439d0205..d688ef42a6a1ff74ce8467cc94b0ff69c9095ce0 100644 (file)
@@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
        struct inode *inode = page->mapping->host;
        struct super_block *sb = inode->i_sb;
        struct squashfs_sb_info *msblk = sb->s_fs_info;
-       int index = page->index << PAGE_CACHE_SHIFT;
+       int index = page->index << PAGE_SHIFT;
        u64 block = squashfs_i(inode)->start;
        int offset = squashfs_i(inode)->offset;
-       int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
+       int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
        int bytes, copied;
        void *pageaddr;
        struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
                copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
                                                                length - bytes);
                if (copied == length - bytes)
-                       memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
+                       memset(pageaddr + length, 0, PAGE_SIZE - length);
                else
                        block = entry->next_index;
                kunmap_atomic(pageaddr);
index c609624e4b8a8cf88152310337c2533be7bec5cb..6bfaef73d06527f82dcc70ffe33f465a767c4bc2 100644 (file)
@@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
        stream->buf.in_pos = 0;
        stream->buf.in_size = 0;
        stream->buf.out_pos = 0;
-       stream->buf.out_size = PAGE_CACHE_SIZE;
+       stream->buf.out_size = PAGE_SIZE;
        stream->buf.out = squashfs_first_page(output);
 
        do {
@@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        stream->buf.out = squashfs_next_page(output);
                        if (stream->buf.out != NULL) {
                                stream->buf.out_pos = 0;
-                               total += PAGE_CACHE_SIZE;
+                               total += PAGE_SIZE;
                        }
                }
 
index 8727caba6882209ad62102c0148a09b64421626a..2ec24d128bce0856ec89b46c3ec0476b5de78c87 100644 (file)
@@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
        int zlib_err, zlib_init = 0, k = 0;
        z_stream *stream = strm;
 
-       stream->avail_out = PAGE_CACHE_SIZE;
+       stream->avail_out = PAGE_SIZE;
        stream->next_out = squashfs_first_page(output);
        stream->avail_in = 0;
 
@@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
                if (stream->avail_out == 0) {
                        stream->next_out = squashfs_next_page(output);
                        if (stream->next_out != NULL)
-                               stream->avail_out = PAGE_CACHE_SIZE;
+                               stream->avail_out = PAGE_SIZE;
                }
 
                if (!zlib_init) {
index dd5d1711c7ac3298119316d5e77dab252f86c496..2a54c1f2203595823df1bd97680b9a1fb163e400 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
                goto out;
 
        if (sizeof(pgoff_t) == 4) {
-               if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+               if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
                        /*
                         * The range starts outside a 32 bit machine's
                         * pagecache addressing capabilities.  Let it "succeed"
@@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
                        ret = 0;
                        goto out;
                }
-               if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+               if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
                        /*
                         * Out to EOF
                         */
index 63c1bcb224ee886e5261b5b3a9867a581f872f01..c0f0a3e643eb7cb5dbc5d3469f08625af2ae43ec 100644 (file)
@@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = {
 static inline void dir_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
        if (pos >= inode->i_size)
                return 0;
 
-       offset = pos & ~PAGE_CACHE_MASK;
-       n = pos >> PAGE_CACHE_SHIFT;
+       offset = pos & ~PAGE_MASK;
+       n = pos >> PAGE_SHIFT;
 
        for ( ; n < npages; n++, offset = 0) {
                char *kaddr, *limit;
@@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
                        continue;
                kaddr = (char *)page_address(page);
                de = (struct sysv_dir_entry *)(kaddr+offset);
-               limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+               limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
                for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
                        char *name = de->name;
 
@@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
                if (!IS_ERR(page)) {
                        kaddr = (char*)page_address(page);
                        de = (struct sysv_dir_entry *) kaddr;
-                       kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+                       kaddr += PAGE_SIZE - SYSV_DIRSIZE;
                        for ( ; (char *) de <= kaddr ; de++) {
                                if (!de->inode)
                                        continue;
@@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
                        goto out;
                kaddr = (char*)page_address(page);
                de = (struct sysv_dir_entry *)kaddr;
-               kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+               kaddr += PAGE_SIZE - SYSV_DIRSIZE;
                while ((char *)de <= kaddr) {
                        if (!de->inode)
                                goto got_it;
@@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
        kmap(page);
 
        base = (char*)page_address(page);
-       memset(base, 0, PAGE_CACHE_SIZE);
+       memset(base, 0, PAGE_SIZE);
 
        de = (struct sysv_dir_entry *) base;
        de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
        kunmap(page);
        err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
@@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode)
 
                kaddr = (char *)page_address(page);
                de = (struct sysv_dir_entry *)kaddr;
-               kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE;
+               kaddr += PAGE_SIZE-SYSV_DIRSIZE;
 
                for ( ;(char *)de <= kaddr; de++) {
                        if (!de->inode)
index 11e83ed0b4bf495c39a2d6f0128f8fce3793256f..90b60c03b588488cd16b96f2f324f026ee7ee1bf 100644 (file)
@@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        return err;
 }
index 065c88f8e4b8c2d689e8fe9ce13122b5465e8f63..446753d8ac34954794550f4051feafa5ae23ce0d 100644 (file)
@@ -121,7 +121,7 @@ static int do_readpage(struct page *page)
        if (block >= beyond) {
                /* Reading beyond inode */
                SetPageChecked(page);
-               memset(addr, 0, PAGE_CACHE_SIZE);
+               memset(addr, 0, PAGE_SIZE);
                goto out;
        }
 
@@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct ubifs_info *c = inode->i_sb->s_fs_info;
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        struct ubifs_budget_req req = { .new_page = 1 };
        int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
        struct page *page;
@@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping,
        }
 
        if (!PageUptodate(page)) {
-               if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
+               if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
                        SetPageChecked(page);
                else {
                        err = do_readpage(page);
                        if (err) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                ubifs_release_budget(c, &req);
                                return err;
                        }
@@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct ubifs_inode *ui = ubifs_inode(inode);
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
        int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
        int skipped_read = 0;
        struct page *page;
@@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
 
        if (!PageUptodate(page)) {
                /* The page is not loaded from the flash */
-               if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
+               if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
                        /*
                         * We change whole page so no need to load it. But we
                         * do not know whether this page exists on the media or
@@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
                        err = do_readpage(page);
                        if (err) {
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                                return err;
                        }
                }
@@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
                        mutex_unlock(&ui->ui_mutex);
                }
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                return write_begin_slow(mapping, pos, len, pagep, flags);
        }
@@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
        dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
                inode->i_ino, pos, page->index, len, copied, inode->i_size);
 
-       if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
+       if (unlikely(copied < len && len == PAGE_SIZE)) {
                /*
                 * VFS copied less data to the page that it intended and
                 * declared in its '->write_begin()' call via the @len
                 * argument. If the page was not up-to-date, and @len was
-                * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
+                * @PAGE_SIZE, the 'ubifs_write_begin()' function did
                 * not load it from the media (for optimization reasons). This
                 * means that part of the page contains garbage. So read the
                 * page now.
@@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
 
 out:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return copied;
 }
 
@@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
 
        addr = zaddr = kmap(page);
 
-       end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+       end_index = (i_size - 1) >> PAGE_SHIFT;
        if (!i_size || page->index > end_index) {
                hole = 1;
-               memset(addr, 0, PAGE_CACHE_SIZE);
+               memset(addr, 0, PAGE_SIZE);
                goto out_hole;
        }
 
@@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
        }
 
        if (end_index == page->index) {
-               int len = i_size & (PAGE_CACHE_SIZE - 1);
+               int len = i_size & (PAGE_SIZE - 1);
 
                if (len && len < read)
                        memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
        isize = i_size_read(inode);
        if (isize == 0)
                goto out_free;
-       end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+       end_index = ((isize - 1) >> PAGE_SHIFT);
 
        for (page_idx = 1; page_idx < page_cnt; page_idx++) {
                pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
                if (!PageUptodate(page))
                        err = populate_page(c, page, bu, &n);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                if (err)
                        break;
        }
@@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len)
 #ifdef UBIFS_DEBUG
        struct ubifs_inode *ui = ubifs_inode(inode);
        spin_lock(&ui->ui_lock);
-       ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT);
+       ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
        spin_unlock(&ui->ui_lock);
 #endif
 
@@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
        struct inode *inode = page->mapping->host;
        struct ubifs_inode *ui = ubifs_inode(inode);
        loff_t i_size =  i_size_read(inode), synced_i_size;
-       pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
-       int err, len = i_size & (PAGE_CACHE_SIZE - 1);
+       pgoff_t end_index = i_size >> PAGE_SHIFT;
+       int err, len = i_size & (PAGE_SIZE - 1);
        void *kaddr;
 
        dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
 
        /* Is the page fully inside @i_size? */
        if (page->index < end_index) {
-               if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
+               if (page->index >= synced_i_size >> PAGE_SHIFT) {
                        err = inode->i_sb->s_op->write_inode(inode, NULL);
                        if (err)
                                goto out_unlock;
@@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
                         * with this.
                         */
                }
-               return do_writepage(page, PAGE_CACHE_SIZE);
+               return do_writepage(page, PAGE_SIZE);
        }
 
        /*
@@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
         * writes to that region are not written out to the file."
         */
        kaddr = kmap_atomic(page);
-       memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
+       memset(kaddr + len, 0, PAGE_SIZE - len);
        flush_dcache_page(page);
        kunmap_atomic(kaddr);
 
@@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
        truncate_setsize(inode, new_size);
 
        if (offset) {
-               pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
+               pgoff_t index = new_size >> PAGE_SHIFT;
                struct page *page;
 
                page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
                                clear_page_dirty_for_io(page);
                                if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
                                        offset = new_size &
-                                                (PAGE_CACHE_SIZE - 1);
+                                                (PAGE_SIZE - 1);
                                err = do_writepage(page, offset);
-                               page_cache_release(page);
+                               put_page(page);
                                if (err)
                                        goto out_budg;
                                /*
@@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
                                 * having to read it.
                                 */
                                unlock_page(page);
-                               page_cache_release(page);
+                               put_page(page);
                        }
                }
        }
@@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
        struct ubifs_info *c = inode->i_sb->s_fs_info;
 
        ubifs_assert(PagePrivate(page));
-       if (offset || length < PAGE_CACHE_SIZE)
+       if (offset || length < PAGE_SIZE)
                /* Partial page remains dirty */
                return;
 
index a233ba913be4f0ebe91a56dc5dae9c6c8d01862c..e98c24ee25a10e690c59962bcfecb9f07ccbae56 100644 (file)
@@ -2237,12 +2237,12 @@ static int __init ubifs_init(void)
        BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
 
        /*
-        * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
+        * We require that PAGE_SIZE is greater-than-or-equal-to
         * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
         */
-       if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
+       if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
                pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
-                      current->pid, (unsigned int)PAGE_CACHE_SIZE);
+                      current->pid, (unsigned int)PAGE_SIZE);
                return -EINVAL;
        }
 
index c2a57e193a81c23138d5f775afc7e69a1ee066e3..4cd7e569cd00d3dcc07ef6c3de38f9c5987d149f 100644 (file)
@@ -46,8 +46,8 @@
 #define UBIFS_SUPER_MAGIC 0x24051905
 
 /* Number of UBIFS blocks per VFS page */
-#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE)
-#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT)
+#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
+#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
 
 /* "File system end of life" sequence number watermark */
 #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
index 1af98963d860f0e4ed2959d256ff53737d070fcb..877ba1c9b461e38c554a3aa9c51852e28b0b9ca9 100644 (file)
@@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page)
 
        kaddr = kmap(page);
        memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
-       memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
+       memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
        flush_dcache_page(page);
        SetPageUptodate(page);
        kunmap(page);
@@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file,
 {
        struct page *page;
 
-       if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+       if (WARN_ON_ONCE(pos >= PAGE_SIZE))
                return -EIO;
        page = grab_cache_page_write_begin(mapping, 0, flags);
        if (!page)
                return -ENOMEM;
        *pagep = page;
 
-       if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+       if (!PageUptodate(page) && len != PAGE_SIZE)
                __udf_adinicb_readpage(page);
        return 0;
 }
index 166d3ed32c39a54b48c4ec93e2297de3d9efdcc2..2dc461eeb4155bac9bf6a0cf9d32d02bcaffa68f 100644 (file)
@@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode)
        if (!PageUptodate(page)) {
                kaddr = kmap(page);
                memset(kaddr + iinfo->i_lenAlloc, 0x00,
-                      PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
+                      PAGE_SIZE - iinfo->i_lenAlloc);
                memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
                        iinfo->i_lenAlloc);
                flush_dcache_page(page);
@@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode)
                inode->i_data.a_ops = &udf_adinicb_aops;
                up_write(&iinfo->i_data_sem);
        }
-       page_cache_release(page);
+       put_page(page);
        mark_inode_dirty(inode);
 
        return err;
index fa92fe839fda2f989e3d52c368cc7520b80679e5..36661acaf33b4f61d27f8f48d467ced432521b3c 100644 (file)
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 #endif
        }
 
-       ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+       ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
        if (ret < 0)
                goto out_bh;
 
        strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
        udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-       ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+       ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
        if (ret < 0)
                goto out_bh;
 
index 972b70625614f837310e39d41f6d8e836d205144..263829ef1873644a16ac3340555291a7fc240a4a 100644 (file)
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
index 3ff42f4437f3eb3374fea6b1cd0e839b71b65577..695389a4fc239f245cfacfa5a1e2fde9eae5b4a7 100644 (file)
@@ -335,9 +335,21 @@ try_again:
        return u_len;
 }
 
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+                     const uint8_t *ocu_i, int i_len)
 {
-       return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+       int s_len = 0;
+
+       if (i_len > 0) {
+               s_len = ocu_i[i_len - 1];
+               if (s_len >= i_len) {
+                       pr_err("incorrect dstring lengths (%d/%d)\n",
+                              s_len, i_len);
+                       return -EINVAL;
+               }
+       }
+
+       return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
                                 udf_uni2char_utf8, 0);
 }
 
index dc5fae601c24b40e432907dd93c1da6361453431..0447b949c7f5c65555603dced6de8cfac84eba4a 100644 (file)
@@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
                               sector_t newb, struct page *locked_page)
 {
        const unsigned blks_per_page =
-               1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               1 << (PAGE_SHIFT - inode->i_blkbits);
        const unsigned mask = blks_per_page - 1;
        struct address_space * const mapping = inode->i_mapping;
        pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
 
        cur_index = locked_page->index;
        end = count + beg;
-       last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
        for (i = beg; i < end; i = (i | mask) + 1) {
-               index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               index = i >> (PAGE_SHIFT - inode->i_blkbits);
 
                if (likely(cur_index != index)) {
                        page = ufs_get_locked_page(mapping, index);
index 74f2e80288bfad7824961373891fad9bea30a158..0b1457292734c8f02c1747b27624bc1851b1aa53 100644 (file)
@@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
 static inline void ufs_put_page(struct page *page)
 {
        kunmap(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page)
        struct super_block *sb = dir->i_sb;
        char *kaddr = page_address(page);
        unsigned offs, rec_len;
-       unsigned limit = PAGE_CACHE_SIZE;
+       unsigned limit = PAGE_SIZE;
        const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
        struct ufs_dir_entry *p;
        char *error;
 
-       if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-               limit = dir->i_size & ~PAGE_CACHE_MASK;
+       if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+               limit = dir->i_size & ~PAGE_MASK;
                if (limit & chunk_mask)
                        goto Ebadsize;
                if (!limit)
@@ -170,7 +170,7 @@ Einumber:
 bad_entry:
        ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
                   "offset=%lu, rec_len=%d, name_len=%d",
-                  dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+                  dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
                   rec_len, ufs_get_de_namlen(sb, p));
        goto fail;
 Eend:
@@ -178,7 +178,7 @@ Eend:
        ufs_error(sb, __func__,
                   "entry in directory #%lu spans the page boundary"
                   "offset=%lu",
-                  dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
+                  dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
 fail:
        SetPageChecked(page);
        SetPageError(page);
@@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr)
 {
        unsigned last_byte = inode->i_size;
 
-       last_byte -= page_nr << PAGE_CACHE_SHIFT;
-       if (last_byte > PAGE_CACHE_SIZE)
-               last_byte = PAGE_CACHE_SIZE;
+       last_byte -= page_nr << PAGE_SHIFT;
+       if (last_byte > PAGE_SIZE)
+               last_byte = PAGE_SIZE;
        return last_byte;
 }
 
@@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
                kaddr = page_address(page);
                dir_end = kaddr + ufs_last_byte(dir, n);
                de = (struct ufs_dir_entry *)kaddr;
-               kaddr += PAGE_CACHE_SIZE - reclen;
+               kaddr += PAGE_SIZE - reclen;
                while ((char *)de <= kaddr) {
                        if ((char *)de == dir_end) {
                                /* We hit i_size */
@@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
        loff_t pos = ctx->pos;
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       unsigned int offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = pos & ~PAGE_MASK;
+       unsigned long n = pos >> PAGE_SHIFT;
        unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
        int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
                        ufs_error(sb, __func__,
                                  "bad page in #%lu",
                                  inode->i_ino);
-                       ctx->pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_SIZE - offset;
                        return -EIO;
                }
                kaddr = page_address(page);
                if (unlikely(need_revalidate)) {
                        if (offset) {
                                offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
-                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_SHIFT) + offset;
                        }
                        file->f_version = inode->i_version;
                        need_revalidate = 0;
@@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
 
        kmap(page);
        base = (char*)page_address(page);
-       memset(base, 0, PAGE_CACHE_SIZE);
+       memset(base, 0, PAGE_SIZE);
 
        de = (struct ufs_dir_entry *) base;
 
@@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
 
        err = ufs_commit_chunk(page, 0, chunk_size);
 fail:
-       page_cache_release(page);
+       put_page(page);
        return err;
 }
 
index d897e169ab9c4f240e1f4023955f37bf0fe8f266..9f49431e798d6778f371eb482eed0c0390c250c4 100644 (file)
@@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
        lastfrag--;
 
        lastpage = ufs_get_locked_page(mapping, lastfrag >>
-                                      (PAGE_CACHE_SHIFT - inode->i_blkbits));
+                                      (PAGE_SHIFT - inode->i_blkbits));
        if (IS_ERR(lastpage)) {
                err = -EIO;
                goto out;
        }
 
-       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
+       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
        bh = page_buffers(lastpage);
        for (i = 0; i < end; ++i)
                bh = bh->b_this_page;
index acf4a3b61b81fcc58342819eabbc768e6ba33bae..a1559f762805390fbc9854859040d397aa42c208 100644 (file)
@@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
                else {
                        kunmap(dir_page);
-                       page_cache_release(dir_page);
+                       put_page(dir_page);
                }
                inode_dec_link_count(old_dir);
        }
@@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
 out_dir:
        if (dir_de) {
                kunmap(dir_page);
-               page_cache_release(dir_page);
+               put_page(dir_page);
        }
 out_old:
        kunmap(old_page);
-       page_cache_release(old_page);
+       put_page(old_page);
 out:
        return err;
 }
index b6c2f94e041edc5840cc7a081b0ea6b8f4abfbd9..a409e3e7827ab09ee26547cbe3a299ecc5731ece 100644 (file)
@@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
                if (unlikely(page->mapping == NULL)) {
                        /* Truncate got there first */
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        page = NULL;
                        goto out;
                }
 
                if (!PageUptodate(page) || PageError(page)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
 
                        printk(KERN_ERR "ufs_change_blocknr: "
                               "can not read page: ino %lu, index: %lu\n",
index 95417592824094c0655a0bf5839c3da442d5db46..b7fbf53dbc81a044e2bd10428bf2601b094cb750 100644 (file)
@@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping,
 static inline void ufs_put_locked_page(struct page *page)
 {
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 
index 041b6948aeccd928f6f88d95ed9985052d915390..ce41d7fe753c5dcbb7edfe85771e1a121c4e580b 100644 (file)
@@ -3742,11 +3742,11 @@ xfs_bmap_btalloc(
                args.prod = align;
                if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
                        args.mod = (xfs_extlen_t)(args.prod - args.mod);
-       } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
+       } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
                args.prod = 1;
                args.mod = 0;
        } else {
-               args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
+               args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
                if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
                        args.mod = (xfs_extlen_t)(args.prod - args.mod);
        }
index d445a64b979e963dccbef0a721989c112351a97e..e49b2406d15d203bee6c096c36bf934b730c7fd3 100644 (file)
@@ -704,7 +704,7 @@ next_buffer:
 
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_invalidate:
-       xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+       xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
        return;
 }
 
@@ -925,9 +925,9 @@ xfs_do_writepage(
         * ---------------------------------^------------------|
         */
        offset = i_size_read(inode);
-       end_index = offset >> PAGE_CACHE_SHIFT;
+       end_index = offset >> PAGE_SHIFT;
        if (page->index < end_index)
-               end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
+               end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
        else {
                /*
                 * Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@ xfs_do_writepage(
                 * |                                |      Straddles     |
                 * ---------------------------------^-----------|--------|
                 */
-               unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
+               unsigned offset_into_page = offset & (PAGE_SIZE - 1);
 
                /*
                 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@ xfs_do_writepage(
                 * memory is zeroed when mapped, and writes to that region are
                 * not written out to the file."
                 */
-               zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
+               zero_user_segment(page, offset_into_page, PAGE_SIZE);
 
                /* Adjust the end_offset to the end of file */
                end_offset = offset;
@@ -1475,7 +1475,7 @@ xfs_vm_write_failed(
        loff_t                  block_offset;
        loff_t                  block_start;
        loff_t                  block_end;
-       loff_t                  from = pos & (PAGE_CACHE_SIZE - 1);
+       loff_t                  from = pos & (PAGE_SIZE - 1);
        loff_t                  to = from + len;
        struct buffer_head      *bh, *head;
        struct xfs_mount        *mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1491,7 @@ xfs_vm_write_failed(
         * start of the page by using shifts rather than masks the mismatch
         * problem.
         */
-       block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
+       block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
 
        ASSERT(block_offset + from == pos);
 
@@ -1558,12 +1558,12 @@ xfs_vm_write_begin(
        struct page             **pagep,
        void                    **fsdata)
 {
-       pgoff_t                 index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t                 index = pos >> PAGE_SHIFT;
        struct page             *page;
        int                     status;
        struct xfs_mount        *mp = XFS_I(mapping->host)->i_mount;
 
-       ASSERT(len <= PAGE_CACHE_SIZE);
+       ASSERT(len <= PAGE_SIZE);
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
@@ -1592,7 +1592,7 @@ xfs_vm_write_begin(
                        truncate_pagecache_range(inode, start, pos + len);
                }
 
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
        }
 
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
 {
        int                     ret;
 
-       ASSERT(len <= PAGE_CACHE_SIZE);
+       ASSERT(len <= PAGE_SIZE);
 
        ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
        if (unlikely(ret < len)) {
index a32c1dcae2ff37b3ee1542fd3fbc5dd74a1e0a86..3b6309865c65336793a7ac84009188b44b394a32 100644 (file)
@@ -1237,7 +1237,7 @@ xfs_free_file_space(
        /* wait for the completion of any pending DIOs */
        inode_dio_wait(VFS_I(ip));
 
-       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
        ioffset = round_down(offset, rounding);
        iendoffset = round_up(offset + len, rounding) - 1;
        error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@ xfs_shift_file_space(
        if (error)
                return error;
        error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
-                                       offset >> PAGE_CACHE_SHIFT, -1);
+                                       offset >> PAGE_SHIFT, -1);
        if (error)
                return error;
 
index ac0fd32de31e4e5455e43da208cdef4861710a21..569938a4a357341915f3c0553a16fa5acd782e0f 100644 (file)
@@ -106,8 +106,8 @@ xfs_iozero(
                unsigned offset, bytes;
                void *fsdata;
 
-               offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
-               bytes = PAGE_CACHE_SIZE - offset;
+               offset = (pos & (PAGE_SIZE -1)); /* Within page */
+               bytes = PAGE_SIZE - offset;
                if (bytes > count)
                        bytes = count;
 
@@ -799,8 +799,8 @@ xfs_file_dio_aio_write(
        /* see generic_file_direct_write() for why this is necessary */
        if (mapping->nrpages) {
                invalidate_inode_pages2_range(mapping,
-                                             pos >> PAGE_CACHE_SHIFT,
-                                             end >> PAGE_CACHE_SHIFT);
+                                             pos >> PAGE_SHIFT,
+                                             end >> PAGE_SHIFT);
        }
 
        if (ret > 0) {
@@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff(
 
        pagevec_init(&pvec, 0);
 
-       index = startoff >> PAGE_CACHE_SHIFT;
+       index = startoff >> PAGE_SHIFT;
        endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-       end = endoff >> PAGE_CACHE_SHIFT;
+       end = endoff >> PAGE_SHIFT;
        do {
                int             want;
                unsigned        nr_pages;
index ec0e239a0fa9070f30541b14f305b8d28c153339..a8192dc797dcc45315a5bfdff067992e10cd85ba 100644 (file)
@@ -135,7 +135,7 @@ typedef __u32                       xfs_nlink_t;
  * Size of block device i/o is parameterized here.
  * Currently the system supports page-sized i/o.
  */
-#define        BLKDEV_IOSHIFT          PAGE_CACHE_SHIFT
+#define        BLKDEV_IOSHIFT          PAGE_SHIFT
 #define        BLKDEV_IOSIZE           (1<<BLKDEV_IOSHIFT)
 /* number of BB's per block device block */
 #define        BLKDEV_BB               BTOBB(BLKDEV_IOSIZE)
index 536a0ee9cd5af1fbbc89960a597eb7158b676771..cfd4210dd01500203c5c90e2fe064442fe0998ac 100644 (file)
@@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count(
        ASSERT(sbp->sb_blocklog >= BBSHIFT);
 
        /* Limited by ULONG_MAX of page cache index */
-       if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
+       if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
                return -EFBIG;
        return 0;
 }
index bac6b3435591b6725ea2b4a6115fd7bf89db8c15..eafe257b357addf83152f0c0ca53128e320abc94 100644 (file)
@@ -231,12 +231,12 @@ static inline unsigned long
 xfs_preferred_iosize(xfs_mount_t *mp)
 {
        if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
-               return PAGE_CACHE_SIZE;
+               return PAGE_SIZE;
        return (mp->m_swidth ?
                (mp->m_swidth << mp->m_sb.sb_blocklog) :
                ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
                        (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
-                       PAGE_CACHE_SIZE));
+                       PAGE_SIZE));
 }
 
 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
index ade236e90bb3612d429a8b6b0909b3937302096c..51ddaf2c2b8c96648d70f364d45d6bb95eca3947 100644 (file)
@@ -293,8 +293,8 @@ xfs_fs_commit_blocks(
                 * Make sure reads through the pagecache see the new data.
                 */
                error = invalidate_inode_pages2_range(inode->i_mapping,
-                                       start >> PAGE_CACHE_SHIFT,
-                                       (end - 1) >> PAGE_CACHE_SHIFT);
+                                       start >> PAGE_SHIFT,
+                                       (end - 1) >> PAGE_SHIFT);
                WARN_ON_ONCE(error);
 
                error = xfs_iomap_write_unwritten(ip, start, length);
index d760934109b5d628891ea3e91fa1a9e3f68ec36b..187e14b696c200bac8d78745fce25c449b6f35f6 100644 (file)
@@ -556,10 +556,10 @@ xfs_max_file_offset(
        /* Figure out maximum filesize, on Linux this can depend on
         * the filesystem blocksize (on 32 bit platforms).
         * __block_write_begin does this in an [unsigned] long...
-        *      page->index << (PAGE_CACHE_SHIFT - bbits)
+        *      page->index << (PAGE_SHIFT - bbits)
         * So, for page sized blocks (4K on 32 bit platforms),
         * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
-        *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+        *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
         * but for smaller blocksizes it is less (bbits = log2 bsize).
         * Note1: get_block_t takes a long (implicit cast from above)
         * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@ xfs_max_file_offset(
 #if BITS_PER_LONG == 32
 # if defined(CONFIG_LBDAF)
        ASSERT(sizeof(sector_t) == 8);
-       pagefactor = PAGE_CACHE_SIZE;
+       pagefactor = PAGE_SIZE;
        bitshift = BITS_PER_LONG;
 # else
-       pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+       pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
 # endif
 #endif
 
index 14362a84c78e3ca1d5712d284d6dce22ed656ba1..3a932501d69078951ee4e70c1acd4ae429ca2a85 100644 (file)
@@ -394,13 +394,13 @@ struct acpi_data_node {
 
 static inline bool is_acpi_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && (fwnode->type == FWNODE_ACPI
+       return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
                || fwnode->type == FWNODE_ACPI_DATA);
 }
 
 static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_ACPI;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
 }
 
 static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
index e56272c919b5a688e1739cdefaa08c121e26f5a8..bf2d34c9d804334cd0c634bf4d8ed921c08cfd7a 100644 (file)
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        u32 val;
 
        preempt_disable();
-       if (unlikely(get_user(val, uaddr) != 0))
+       if (unlikely(get_user(val, uaddr) != 0)) {
+               preempt_enable();
                return -EFAULT;
+       }
 
-       if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+       if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+               preempt_enable();
                return -EFAULT;
+       }
 
        *uval = val;
        preempt_enable();
index 461a0558bca4d8d16e81b88e0286e3fa6251ab79..cebecff536a3a6aec1b06c69e32d8bb4ec2f4f29 100644 (file)
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
 {
 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
        return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+       return false;
 #else
        return true;
 #endif
index afae2316bd434e0067f5007f4467c3b0e3f54bb9..055a08ddac02a4a5b1149470f7bd74a03b173096 100644 (file)
@@ -92,7 +92,7 @@ struct ttm_placement {
  */
 struct ttm_bus_placement {
        void            *addr;
-       unsigned long   base;
+       phys_addr_t     base;
        unsigned long   size;
        unsigned long   offset;
        bool            is_iomem;
index 33eb274cd0e6b61be2b000e3e30c82f2ea6c7f1e..e66153d60bd50e8d502d58d44fac4099bb00c689 100644 (file)
@@ -31,6 +31,10 @@ struct ath9k_platform_data {
        u32 gpio_mask;
        u32 gpio_val;
 
+       u32 bt_active_pin;
+       u32 bt_priority_pin;
+       u32 wlan_active_pin;
+
        bool endian_check;
        bool is_clk_25mhz;
        bool tx_gain_buffalo;
index df4f369254c0305174f6be6a6caca6b296c692c5..506c3531832eee836ab30997122070790067cadd 100644 (file)
@@ -559,25 +559,25 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #endif
 
 /**
- * fetch_or - perform *ptr |= mask and return old value of *ptr
- * @ptr: pointer to value
- * @mask: mask to OR on the value
- *
- * cmpxchg based fetch_or, macro so it works for different integer types
+ * atomic_fetch_or - perform *p |= mask and return old value of *p
+ * @p: pointer to atomic_t
+ * @mask: mask to OR on the atomic_t
  */
-#ifndef fetch_or
-#define fetch_or(ptr, mask)                                            \
-({     typeof(*(ptr)) __old, __val = *(ptr);                           \
-       for (;;) {                                                      \
-               __old = cmpxchg((ptr), __val, __val | (mask));          \
-               if (__old == __val)                                     \
-                       break;                                          \
-               __val = __old;                                          \
-       }                                                               \
-       __old;                                                          \
-})
-#endif
+#ifndef atomic_fetch_or
+static inline int atomic_fetch_or(atomic_t *p, int mask)
+{
+       int old, val = atomic_read(p);
+
+       for (;;) {
+               old = atomic_cmpxchg(p, val, val | mask);
+               if (old == val)
+                       break;
+               val = old;
+       }
 
+       return old;
+}
+#endif
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
index 1b4d69f68c33cc73ad99a1136b2408c71e763fb8..3f103076d0bfdc2d72a462ca6c3deb1ef84a3227 100644 (file)
@@ -135,7 +135,7 @@ struct bdi_writeback {
 
 struct backing_dev_info {
        struct list_head bdi_list;
-       unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+       unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
        unsigned int capabilities; /* Device capabilities */
        congested_fn *congested_fn; /* Function pointer if device is md/dm */
        void *congested_data;   /* Pointer to aux data for congested func */
index 88bc64f00bb53cb01fe60ca87dfafd7c9c0f0f4a..6b7481f62218895945ba6dcd1b7df2e49268d552 100644 (file)
@@ -41,7 +41,7 @@
 #endif
 
 #define BIO_MAX_PAGES          256
-#define BIO_MAX_SIZE           (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SIZE           (BIO_MAX_PAGES << PAGE_SHIFT)
 #define BIO_MAX_SECTORS                (BIO_MAX_SIZE >> 9)
 
 /*
index 7e5d7e018bea0877ee617d9f9b93ec037b19209c..669e419d62347e2965bdaffbf8e960e1e34af4f9 100644 (file)
@@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
 
 static inline void put_dev_sector(Sector p)
 {
-       page_cache_release(p.v);
+       put_page(p.v);
 }
 
 static inline bool __bvec_gap_to_prev(struct request_queue *q,
index b2365a6eba3dc86d5d0888a0a98d1077bd943ab0..8ee27b8afe81c5d45b66f7b629c976ed8138d26a 100644 (file)
@@ -66,6 +66,11 @@ enum bpf_arg_type {
         * functions that access data on eBPF program stack
         */
        ARG_PTR_TO_STACK,       /* any pointer to eBPF program stack */
+       ARG_PTR_TO_RAW_STACK,   /* any pointer to eBPF program stack, area does not
+                                * need to be initialized, helper function must fill
+                                * all bytes or clear them in error case.
+                                */
+
        ARG_CONST_STACK_SIZE,   /* number of bytes accessed from stack */
        ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
 
@@ -164,7 +169,9 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_event_output_proto(void);
 
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
@@ -173,12 +180,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
index c67f052cc5e55823a2f1032ba45accb2d3608079..d48daa3f6f20f7b7d3da0c5396168a277a239d14 100644 (file)
@@ -43,7 +43,7 @@ enum bh_state_bits {
                         */
 };
 
-#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
 
 struct page;
 struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
 static inline void attach_page_buffers(struct page *page,
                struct buffer_head *head)
 {
-       page_cache_get(page);
+       get_page(page);
        SetPagePrivate(page);
        set_page_private(page, (unsigned long)head);
 }
index 735f9f8c4e43e5350f36bc4b39e9f8634778fd40..5261751f6bd4dec116516691cbc40a6763a6d12b 100644 (file)
@@ -40,8 +40,11 @@ struct can_priv {
        struct can_clock clock;
 
        enum can_state state;
-       u32 ctrlmode;
-       u32 ctrlmode_supported;
+
+       /* CAN controller features - see include/uapi/linux/can/netlink.h */
+       u32 ctrlmode;           /* current options setting */
+       u32 ctrlmode_supported; /* options that can be modified by netlink */
+       u32 ctrlmode_static;    /* static enabled options for driver/hardware */
 
        int restart_ms;
        struct timer_list restart_timer;
@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
        return skb->len == CANFD_MTU;
 }
 
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+                                          u32 static_mode)
+{
+       struct can_priv *priv = netdev_priv(dev);
+
+       /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+       priv->ctrlmode = static_mode;
+       priv->ctrlmode_static = static_mode;
+
+       /* override MTU which was set by default in can_setup()? */
+       if (static_mode & CAN_CTRLMODE_FD)
+               dev->mtu = CANFD_MTU;
+}
+
 /* get data length from can_dlc with sanitized can_dlc */
 u8 can_dlc2len(u8 can_dlc);
 
index 260d78b587c48e99f552f328a27a65ddd9745dd2..1563265d209740aadc845d28c1389091c5249728 100644 (file)
  */
 
 struct ceph_auth_client;
-struct ceph_authorizer;
 struct ceph_msg;
 
+struct ceph_authorizer {
+       void (*destroy)(struct ceph_authorizer *);
+};
+
 struct ceph_auth_handshake {
        struct ceph_authorizer *authorizer;
        void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
                                 struct ceph_auth_handshake *auth);
        int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
                                       struct ceph_authorizer *a, size_t len);
-       void (*destroy_authorizer)(struct ceph_auth_client *ac,
-                                  struct ceph_authorizer *a);
        void (*invalidate_authorizer)(struct ceph_auth_client *ac,
                                      int peer_type);
 
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
 extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
                                       int peer_type,
                                       struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
-                                        struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
 extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
                                       int peer_type,
                                       struct ceph_auth_handshake *a);
index e7975e4681e1a9110ee26506eaf4402ffcbad082..db92a8d4926eed9f9bf155564c274b1552477e4b 100644 (file)
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
  */
 static inline int calc_pages_for(u64 off, u64 len)
 {
-       return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
-               (off >> PAGE_CACHE_SHIFT);
+       return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
+               (off >> PAGE_SHIFT);
 }
 
 extern struct kmem_cache *ceph_inode_cachep;
index 4343df80671019a01e7a0b7aeec89f191670e6f0..cbf460927c424b26c76665edca16337eb62ef9a9 100644 (file)
@@ -16,7 +16,6 @@ struct ceph_msg;
 struct ceph_snap_context;
 struct ceph_osd_request;
 struct ceph_osd_client;
-struct ceph_authorizer;
 
 /*
  * completion callback for async writepages
index 3e39ae5bc7999d110b5822f7f6fd3fd355371a5f..5b17de62c962cd73d625427c2230d66e08cbcb4b 100644 (file)
@@ -444,6 +444,7 @@ struct cgroup_subsys {
        int (*can_attach)(struct cgroup_taskset *tset);
        void (*cancel_attach)(struct cgroup_taskset *tset);
        void (*attach)(struct cgroup_taskset *tset);
+       void (*post_attach)(void);
        int (*can_fork)(struct task_struct *task);
        void (*cancel_fork)(struct task_struct *task);
        void (*fork)(struct task_struct *task);
index 22ab246feed34c104038d3f94e1401ea9a587f8f..3d5202eda22f262e5c134bfb1f55b7e248dfcd79 100644 (file)
 #define unreachable() __builtin_unreachable()
 
 /* Mark a function definition as prohibited from being cloned. */
-#define __noclone      __attribute__((__noclone__))
+#define __noclone      __attribute__((__noclone__, __optimize__("no-tracer")))
 
 #endif /* GCC_VERSION >= 40500 */
 
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
 #endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
index 485fe5519448ba451a247d944ca2df0f8811e4f7..d9d6a9d77489a72ef30519c67b08e046a0e81b46 100644 (file)
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
 }
 
 #define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz)       \
-static struct configfs_attribute _pfx##attr_##_name = {                \
+static struct configfs_bin_attribute _pfx##attr_##_name = {    \
        .cb_attr = {                                            \
                .ca_name        = __stringify(_name),           \
                .ca_mode        = S_IRUGO,                      \
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = {             \
 }
 
 #define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz)       \
-static struct configfs_attribute _pfx##attr_##_name = {                \
+static struct configfs_bin_attribute _pfx##attr_##_name = {    \
        .cb_attr = {                                            \
                .ca_name        = __stringify(_name),           \
                .ca_mode        = S_IWUSR,                      \
index fea160ee5803fd121d0493f622e240b4c35da480..85a868ccb4931d374a1ee9fb4e4036bb84399561 100644 (file)
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
        task_unlock(current);
 }
 
-extern void cpuset_post_attach_flush(void);
-
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
        return false;
 }
 
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
index 7cb043d8f4e8079cb1156e22b43e3a287a61294f..4bb4de8d95ea5869000db22baa662451c6acd1a0 100644 (file)
@@ -161,6 +161,7 @@ struct dentry_operations {
        struct vfsmount *(*d_automount)(struct path *);
        int (*d_manage)(struct dentry *, bool);
        struct inode *(*d_select_inode)(struct dentry *, unsigned);
+       struct dentry *(*d_real)(struct dentry *, struct inode *);
 } ____cacheline_aligned;
 
 /*
@@ -229,6 +230,7 @@ struct dentry_operations {
 #define DCACHE_OP_SELECT_INODE         0x02000000 /* Unioned entry: dcache op selects inode */
 
 #define DCACHE_ENCRYPTED_WITH_KEY      0x04000000 /* dir is encrypted with a valid key */
+#define DCACHE_OP_REAL                 0x08000000
 
 extern seqlock_t rename_lock;
 
@@ -555,4 +557,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
        return upper;
 }
 
+static inline struct dentry *d_real(struct dentry *dentry)
+{
+       if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+               return dentry->d_op->d_real(dentry, NULL);
+       else
+               return dentry;
+}
+
 #endif /* __LINUX_DCACHE_H */
index e0ee0b3000b2da107c975137165fc989777d8a58..5871f292b596fb58349c1f4c6e0202aede55b7f8 100644 (file)
 
 #include <linux/errno.h>
 
+struct pts_fs_info;
+
 #ifdef CONFIG_UNIX98_PTYS
 
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
 /* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-               void *priv);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
 /* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
 /* unlink */
-void devpts_pty_kill(struct inode *inode);
-
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
-               dev_t device, int index, void *priv)
-{
-       return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
-       return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
+void devpts_pty_kill(struct dentry *);
 
 #endif
 
index e2b7bf27c03ef3ea8ec089a02fc735b78b5c0072..9ded8c6d8176b909cf68da0e125eef4441b7c9a9 100644 (file)
@@ -150,6 +150,13 @@ extern int
 __ethtool_get_link_ksettings(struct net_device *dev,
                             struct ethtool_link_ksettings *link_ksettings);
 
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+                                            u32 legacy_u32);
+
+/* return false if src had higher bits set. lower bits always updated. */
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+                                    const unsigned long *src);
+
 /**
  * struct ethtool_ops - optional netdev operations
  * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
index 9eb215a155e0f685fe14fcb9bc6d5365b6c0908b..b90e9bdbd1dd6b8bba26c52b2f5096b1fc3d79a8 100644 (file)
@@ -262,7 +262,7 @@ struct f2fs_node {
 /*
  * For NAT entries
  */
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
 
 struct f2fs_nat_entry {
        __u8 version;           /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
  * Not allow to change this.
  */
 #define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
 
 /*
  * Note that f2fs_sit_entry->vblocks has the following bit-field information.
index 43aa1f8855c7ff59ab562b722cfd8dc19c4eee7e..ec1411c891056daa6f372f5fabc622c9cdb22a52 100644 (file)
@@ -352,6 +352,22 @@ struct sk_filter {
 
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
+struct bpf_skb_data_end {
+       struct qdisc_skb_cb qdisc_cb;
+       void *data_end;
+};
+
+/* compute the linear packet data range [data, data_end) which
+ * will be accessed by cls_bpf and act_bpf programs
+ */
+static inline void bpf_compute_data_end(struct sk_buff *skb)
+{
+       struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+       BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+       cb->data_end = skb->data + skb_headlen(skb);
+}
+
 static inline u8 *bpf_skb_cb(struct sk_buff *skb)
 {
        /* eBPF programs may read/write skb->cb[] area to transfer meta
index 14a97194b34ba16ba7906ae8c27b1767f266f317..70e61b58baaf662d15f4ca67f355b5dcf8d52438 100644 (file)
@@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f)
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
    limits, otherwise bad things can happen in VM. */ 
 #if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE       (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
+#define MAX_LFS_FILESIZE       (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
 #elif BITS_PER_LONG==64
 #define MAX_LFS_FILESIZE       ((loff_t)0x7fffffffffffffffLL)
 #endif
@@ -1241,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f)
        return f->f_inode;
 }
 
+static inline struct dentry *file_dentry(const struct file *file)
+{
+       struct dentry *dentry = file->f_path.dentry;
+
+       if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+               return dentry->d_op->d_real(dentry, file_inode(file));
+       else
+               return dentry;
+}
+
 static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
 {
        return locks_lock_inode_wait(file_inode(filp), fl);
@@ -2067,7 +2077,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
 /* /sys/fs */
 extern struct kobject *fs_kobj;
 
-#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
 
 #ifdef CONFIG_MANDATORY_FILE_LOCKING
 extern int locks_mandatory_locked(struct file *);
index cd91f75de49b50885669f33c99553c724d543d7a..6027f6bbb0611b20d454ff2ad3ae7330e01deb36 100644 (file)
@@ -263,9 +263,9 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
 extern struct kmem_cache *fscrypt_info_cachep;
 int fscrypt_initialize(void);
 
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
 extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
 extern int fscrypt_decrypt_page(struct page *);
 extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
 extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
 #endif
 
 /* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+                                                       gfp_t f)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
@@ -310,7 +311,7 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
 }
 
 static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
-                                               struct page *p)
+                                               struct page *p, gfp_t f)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
index eecd19b3700112b22268c7570b42e381aa3685f6..6270a56e5edc4eac7f707f121ff2a8e0e536386a 100644 (file)
@@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
 
 /* MAGIC helpers                                                       {{{2 */
 
+static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
+{
+       return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0);
+}
+
 /* possible field types */
 #define __flg_field(attr_nr, attr_flag, name) \
        __field(attr_nr, attr_flag, name, NLA_U8, char, \
@@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
                        nla_get_u32, nla_put_u32, true)
 #define __u64_field(attr_nr, attr_flag, name)  \
        __field(attr_nr, attr_flag, name, NLA_U64, __u64, \
-                       nla_get_u64, nla_put_u64, false)
+                       nla_get_u64, nla_put_u64_0pad, false)
 #define __str_field(attr_nr, attr_flag, name, maxlen) \
        __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
                        nla_strlcpy, nla_put, false)
index 1afde47e1528c36ad8613c2e56fb23d1f614e476..79c52fa81cac9ea2dd3a1bea6e1093cd81fdb79c 100644 (file)
 #error Wordsize not 32 or 64
 #endif
 
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
 static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
-       hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_64;
 #else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
index 7008623e24b19bfef175d32773fa5c9855d36e76..d7b9e5346fba0390890a4e477c8952485319e887 100644 (file)
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
 }
 
 struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
        return false;
 }
 
+static inline void put_huge_zero_page(void)
+{
+       BUILD_BUG();
+}
 
 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmd, int flags)
index d3e415674dac4dfa34145c3f1d3a49685c70413c..acedbb68a5a38d0c51d9d22474a75528496a3e1a 100644 (file)
@@ -47,6 +47,7 @@
 #define IEEE802154_ADDR_SHORT_UNSPEC   0xfffe
 
 #define IEEE802154_EXTENDED_ADDR_LEN   8
+#define IEEE802154_SHORT_ADDR_LEN      2
 
 #define IEEE802154_LIFS_PERIOD         40
 #define IEEE802154_SIFS_PERIOD         12
@@ -218,6 +219,7 @@ enum {
 /* frame control handling */
 #define IEEE802154_FCTL_FTYPE          0x0003
 #define IEEE802154_FCTL_ACKREQ         0x0020
+#define IEEE802154_FCTL_SECEN          0x0004
 #define IEEE802154_FCTL_INTRA_PAN      0x0040
 
 #define IEEE802154_FTYPE_DATA          0x0001
@@ -232,6 +234,15 @@ static inline int ieee802154_is_data(__le16 fc)
                cpu_to_le16(IEEE802154_FTYPE_DATA);
 }
 
+/**
+ * ieee802154_is_secen - check if Security bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_secen(__le16 fc)
+{
+       return fc & cpu_to_le16(IEEE802154_FCTL_SECEN);
+}
+
 /**
  * ieee802154_is_ackreq - check if acknowledgment request bit is set
  * @fc: frame control bytes in little-endian byteorder
@@ -260,17 +271,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc)
  *
  * @len: psdu len with (MHR + payload + MFR)
  */
-static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+static inline bool ieee802154_is_valid_psdu_len(u8 len)
 {
        return (len == IEEE802154_ACK_PSDU_LEN ||
                (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
 }
 
 /**
- * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid
  * @addr: extended addr to check
  */
-static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr)
 {
        /* Bail out if the address is all zero, or if the group
         * address bit is set.
@@ -279,6 +290,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
                !(addr & cpu_to_le64(0x0100000000000000ULL)));
 }
 
+/**
+ * ieee802154_is_broadcast_short_addr - check if short addr is broadcast
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_broadcast_short_addr(__le16 addr)
+{
+       return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST));
+}
+
+/**
+ * ieee802154_is_unspec_short_addr - check if short addr is unspecified
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_unspec_short_addr(__le16 addr)
+{
+       return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC));
+}
+
+/**
+ * ieee802154_is_valid_src_short_addr - check if source short address is valid
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_valid_src_short_addr(__le16 addr)
+{
+       return !(ieee802154_is_broadcast_short_addr(addr) ||
+                ieee802154_is_unspec_short_addr(addr));
+}
+
 /**
  * ieee802154_random_extended_addr - generates a random extended address
  * @addr: extended addr pointer to place the random address
index d5569734f6724d6194497fe9842e9aac56ef0b09..548fd535fd02399634bace0607471d07b973d8dc 100644 (file)
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
index a5c539fa5d2bc03ba233f4d11de1b64d839990ab..ef7a6ecd85846c04fa5cc1aed2c6aa9373fedd91 100644 (file)
@@ -195,9 +195,7 @@ struct iommu_ops {
        /* Get the number of windows per domain */
        u32 (*domain_get_windows)(struct iommu_domain *domain);
 
-#ifdef CONFIG_OF_IOMMU
        int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
-#endif
 
        unsigned long pgsize_bitmap;
        void *priv;
index 7edc14fb66b6870298b7927e0727931d72342c1b..5c91b0b055d4974d4316e5c8c01580ea7abe734e 100644 (file)
@@ -63,7 +63,8 @@ struct ipv6_devconf {
        } stable_secret;
        __s32           use_oif_addrs_only;
        __s32           keep_addr_on_down;
-       void            *sysctl;
+
+       struct ctl_table_header *sysctl_header;
 };
 
 struct ipv6_params {
@@ -117,14 +118,29 @@ struct inet6_skb_parm {
 #define IP6SKB_ROUTERALERT     8
 #define IP6SKB_FRAGMENTED      16
 #define IP6SKB_HOPBYHOP        32
+#define IP6SKB_L3SLAVE         64
 };
 
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+       return flags & IP6SKB_L3SLAVE;
+}
+#else
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+       return false;
+}
+#endif
+
 #define IP6CB(skb)     ((struct inet6_skb_parm*)((skb)->cb))
 #define IP6CBMTU(skb)  ((struct ip6_mtuinfo *)((skb)->cb))
 
 static inline int inet6_iif(const struct sk_buff *skb)
 {
-       return IP6CB(skb)->iif;
+       bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+
+       return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
 }
 
 struct tcp6_request_sock {
index d026b190c53066d25753ce98f0d7c66d864a6c0a..d10ef06971b57d8783934333612cb196421716ab 100644 (file)
@@ -196,9 +196,11 @@ struct lock_list {
  * We record lock dependency chains, so that we can cache them:
  */
 struct lock_chain {
-       u8                              irq_context;
-       u8                              depth;
-       u16                             base;
+       /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+       unsigned int                    irq_context :  2,
+                                       depth       :  6,
+                                       base        : 24;
+       /* 4 byte hole */
        struct hlist_node               entry;
        u64                             chain_key;
 };
index 5bfd99d1a40a7fe91833780909534cd3aa2f990a..bf9d1d75069353b84d9f2ff53c49bd7c6d67bdfa 100644 (file)
 
 struct mii_bus;
 
+/* Multiple levels of nesting are possible. However typically this is
+ * limited to nested DSA like layer, a MUX layer, and the normal
+ * user. Instead of trying to handle the general case, just define
+ * these cases.
+ */
+enum mdio_mutex_lock_class {
+       MDIO_MUTEX_NORMAL,
+       MDIO_MUTEX_MUX,
+       MDIO_MUTEX_NESTED,
+};
+
 struct mdio_device {
        struct device dev;
 
index 8541a913f6a36effd78ef24bb0bc3cd69ce20541..80dec87a94f8431024d88a6303f1f9189b2114e6 100644 (file)
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
        u8                      n_ports;
 };
 
+enum mlx4_pci_status {
+       MLX4_PCI_STATUS_DISABLED,
+       MLX4_PCI_STATUS_ENABLED,
+};
+
 struct mlx4_dev_persistent {
        struct pci_dev         *pdev;
        struct mlx4_dev        *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
        u8              state;
        struct mutex    interface_state_mutex; /* protect SW state */
        u8      interface_state;
+       struct mutex            pci_status_mutex; /* sync pci state */
+       enum mlx4_pci_status    pci_status;
 };
 
 struct mlx4_dev {
@@ -1051,7 +1058,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
 {
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+       if (buf->nbufs == 1)
                return buf->direct.buf + offset;
        else
                return buf->page_list[offset >> PAGE_SHIFT].buf +
@@ -1091,7 +1098,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size, int max_direct);
+                      int size);
 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
                       int size);
 
index 8156e3c9239ce6a5c6883040b1b6d1b619ad1169..035abdf62cfe953e5b15822aa847868032989574 100644 (file)
@@ -59,6 +59,7 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -392,6 +393,17 @@ enum {
        MLX5_CAP_OFF_CMDIF_CSUM         = 46,
 };
 
+enum {
+       /*
+        * Max wqe size for rdma read is 512 bytes, so this
+        * limits our max_sge_rd as the wqe needs to fit:
+        * - ctrl segment (16 bytes)
+        * - rdma segment (16 bytes)
+        * - scatter elements (16 bytes each)
+        */
+       MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
 struct mlx5_inbox_hdr {
        __be16          opcode;
        u8              rsvd[4];
@@ -644,7 +656,9 @@ struct mlx5_err_cqe {
 };
 
 struct mlx5_cqe64 {
-       u8              rsvd0[4];
+       u8              outer_l3_tunneled;
+       u8              rsvd0;
+       __be16          wqe_id;
        u8              lro_tcppsh_abort_dupack;
        u8              lro_min_ttl;
        __be16          lro_tcp_win;
@@ -657,7 +671,7 @@ struct mlx5_cqe64 {
        __be16          slid;
        __be32          flags_rqpn;
        u8              hds_ip_ext;
-       u8              l4_hdr_type_etc;
+       u8              l4_l3_hdr_type;
        __be16          vlan_info;
        __be32          srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
        __be32          imm_inval_pkey;
@@ -671,6 +685,40 @@ struct mlx5_cqe64 {
        u8              op_own;
 };
 
+struct mlx5_mini_cqe8 {
+       union {
+               __be32 rx_hash_result;
+               struct {
+                       __be16 checksum;
+                       __be16 rsvd;
+               };
+               struct {
+                       __be16 wqe_counter;
+                       u8  s_wqe_opcode;
+                       u8  reserved;
+               } s_wqe_info;
+       };
+       __be32 byte_cnt;
+};
+
+enum {
+       MLX5_NO_INLINE_DATA,
+       MLX5_INLINE_DATA32_SEG,
+       MLX5_INLINE_DATA64_SEG,
+       MLX5_COMPRESSED,
+};
+
+enum {
+       MLX5_CQE_FORMAT_CSUM = 0x1,
+};
+
+#define MLX5_MINI_CQE_ARRAY_SIZE 8
+
+static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->op_own >> 2) & 0x3;
+}
+
 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
 {
        return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
@@ -678,12 +726,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
 
 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
 {
-       return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+       return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+       return cqe->outer_l3_tunneled & 0x1;
 }
 
 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
 {
-       return !!(cqe->l4_hdr_type_etc & 0x1);
+       return !!(cqe->l4_l3_hdr_type & 0x1);
 }
 
 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -696,6 +754,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
        return (u64)lo | ((u64)hi << 32);
 }
 
+struct mpwrq_cqe_bc {
+       __be16  filler_consumed_strides;
+       __be16  byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+       struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+       return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+       return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+       struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+       return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+       struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+       return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+       return be16_to_cpu(cqe->wqe_counter);
+}
+
 enum {
        CQE_L4_HDR_TYPE_NONE                    = 0x0,
        CQE_L4_HDR_TYPE_TCP_NO_ACK              = 0x1,
@@ -1289,6 +1383,18 @@ enum mlx5_cap_type {
 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
        MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
 
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+       MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+       MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+       MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+       MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
 #define MLX5_CAP_ESW(mdev, cap) \
        MLX5_GET(e_switch_cap, \
                 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
@@ -1331,6 +1437,7 @@ enum {
        MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
        MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
        MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+       MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
        MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
 };
 
index dcd5ac8d3b1403875bce11aeddaf106acc0cd218..9613143f0561e26297f093134259b4dca6646f75 100644 (file)
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
 
+enum {
+       MLX5_RQ_BITMASK_VSD = 1 << 1,
+};
+
 enum {
        MLX5_BOARD_ID_LEN = 64,
        MLX5_MAX_NAME_LEN = 16,
@@ -112,9 +116,12 @@ enum {
        MLX5_REG_PMPE            = 0x5010,
        MLX5_REG_PELC            = 0x500e,
        MLX5_REG_PVLC            = 0x500f,
-       MLX5_REG_PMLP            = 0, /* TBD */
+       MLX5_REG_PCMR            = 0x5041,
+       MLX5_REG_PMLP            = 0x5002,
        MLX5_REG_NODE_DESC       = 0x6001,
        MLX5_REG_HOST_ENDIANNESS = 0x7004,
+       MLX5_REG_MCIA            = 0x9014,
+       MLX5_REG_MLCR            = 0x902b,
 };
 
 enum {
@@ -511,6 +518,8 @@ struct mlx5_priv {
        unsigned long           pci_dev_data;
        struct mlx5_flow_root_namespace *root_ns;
        struct mlx5_flow_root_namespace *fdb_root_ns;
+       struct mlx5_flow_root_namespace *esw_egress_root_ns;
+       struct mlx5_flow_root_namespace *esw_ingress_root_ns;
 };
 
 enum mlx5_device_state {
@@ -519,8 +528,9 @@ enum mlx5_device_state {
 };
 
 enum mlx5_interface_state {
-       MLX5_INTERFACE_STATE_DOWN,
-       MLX5_INTERFACE_STATE_UP,
+       MLX5_INTERFACE_STATE_DOWN = BIT(0),
+       MLX5_INTERFACE_STATE_UP = BIT(1),
+       MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
 };
 
 enum mlx5_pci_status {
@@ -544,7 +554,7 @@ struct mlx5_core_dev {
        enum mlx5_device_state  state;
        /* sync interface state */
        struct mutex            intf_state_mutex;
-       enum mlx5_interface_state interface_state;
+       unsigned long           intf_state;
        void                    (*event) (struct mlx5_core_dev *dev,
                                          enum mlx5_dev_event event,
                                          unsigned long param);
@@ -552,6 +562,9 @@ struct mlx5_core_dev {
        struct mlx5_profile     *profile;
        atomic_t                num_qps;
        u32                     issi;
+#ifdef CONFIG_RFS_ACCEL
+       struct cpu_rmap         *rmap;
+#endif
 };
 
 struct mlx5_db {
index 8dec5508d93d355e3101935106bcfb582e795193..6467569ad76edb90403feca18587158fb7b1a0c1 100644 (file)
@@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type {
        MLX5_FLOW_NAMESPACE_LEFTOVERS,
        MLX5_FLOW_NAMESPACE_ANCHOR,
        MLX5_FLOW_NAMESPACE_FDB,
+       MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+       MLX5_FLOW_NAMESPACE_ESW_INGRESS,
 };
 
 struct mlx5_flow_table;
@@ -82,12 +84,19 @@ struct mlx5_flow_table *
 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
                                    int prio,
                                    int num_flow_table_entries,
-                                   int max_num_groups);
+                                   int max_num_groups,
+                                   u32 level);
 
 struct mlx5_flow_table *
 mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
                       int prio,
-                      int num_flow_table_entries);
+                      int num_flow_table_entries,
+                      u32 level);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+                            int prio,
+                            int num_flow_table_entries,
+                            u32 level, u16 vport);
 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
 
 /* inbox should be set with the following values:
@@ -113,4 +122,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
                   struct mlx5_flow_destination *dest);
 void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
 
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+                                struct mlx5_flow_destination *dest);
+
 #endif
index c15b8a8649377ea401a556c0f709b3bfe112f30f..4ce4ea422a105ca2acbd5d0fba0499bd64f29b2b 100644 (file)
@@ -513,7 +513,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
        u8         max_lso_cap[0x5];
        u8         reserved_at_10[0x4];
        u8         rss_ind_tbl_cap[0x4];
-       u8         reserved_at_18[0x3];
+       u8         reg_umr_sq[0x1];
+       u8         scatter_fcs[0x1];
+       u8         reserved_at_1a[0x1];
        u8         tunnel_lso_const_out_ip_id[0x1];
        u8         reserved_at_1c[0x2];
        u8         tunnel_statless_gre[0x1];
@@ -648,7 +650,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
 enum {
        MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
        MLX5_WQ_TYPE_CYCLIC       = 0x1,
-       MLX5_WQ_TYPE_STRQ         = 0x2,
+       MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
 };
 
 enum {
@@ -750,21 +752,25 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
        u8         eswitch_flow_table[0x1];
-       u8         early_vf_enable;
-       u8         reserved_at_1a8[0x2];
+       u8         early_vf_enable[0x1];
+       u8         reserved_at_1a9[0x2];
        u8         local_ca_ack_delay[0x5];
-       u8         reserved_at_1af[0x6];
+       u8         reserved_at_1af[0x2];
+       u8         ports_check[0x1];
+       u8         reserved_at_1b2[0x1];
+       u8         disable_link_up[0x1];
+       u8         beacon_led[0x1];
        u8         port_type[0x2];
        u8         num_ports[0x8];
 
-       u8         reserved_at_1bf[0x3];
+       u8         reserved_at_1c0[0x3];
        u8         log_max_msg[0x5];
-       u8         reserved_at_1c7[0x4];
+       u8         reserved_at_1c8[0x4];
        u8         max_tc[0x4];
-       u8         reserved_at_1cf[0x6];
+       u8         reserved_at_1d0[0x6];
        u8         rol_s[0x1];
        u8         rol_g[0x1];
-       u8         reserved_at_1d7[0x1];
+       u8         reserved_at_1d8[0x1];
        u8         wol_s[0x1];
        u8         wol_g[0x1];
        u8         wol_a[0x1];
@@ -774,47 +780,48 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         wol_p[0x1];
 
        u8         stat_rate_support[0x10];
-       u8         reserved_at_1ef[0xc];
+       u8         reserved_at_1f0[0xc];
        u8         cqe_version[0x4];
 
        u8         compact_address_vector[0x1];
-       u8         reserved_at_200[0x3];
+       u8         striding_rq[0x1];
+       u8         reserved_at_201[0x2];
        u8         ipoib_basic_offloads[0x1];
-       u8         reserved_at_204[0xa];
+       u8         reserved_at_205[0xa];
        u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
-       u8         reserved_at_212[0x1];
+       u8         reserved_at_213[0x1];
        u8         wq_signature[0x1];
        u8         sctr_data_cqe[0x1];
-       u8         reserved_at_215[0x1];
+       u8         reserved_at_216[0x1];
        u8         sho[0x1];
        u8         tph[0x1];
        u8         rf[0x1];
        u8         dct[0x1];
-       u8         reserved_at_21a[0x1];
+       u8         reserved_at_21b[0x1];
        u8         eth_net_offloads[0x1];
        u8         roce[0x1];
        u8         atomic[0x1];
-       u8         reserved_at_21e[0x1];
+       u8         reserved_at_21f[0x1];
 
        u8         cq_oi[0x1];
        u8         cq_resize[0x1];
        u8         cq_moderation[0x1];
-       u8         reserved_at_222[0x3];
+       u8         reserved_at_223[0x3];
        u8         cq_eq_remap[0x1];
        u8         pg[0x1];
        u8         block_lb_mc[0x1];
-       u8         reserved_at_228[0x1];
+       u8         reserved_at_229[0x1];
        u8         scqe_break_moderation[0x1];
-       u8         reserved_at_22a[0x1];
+       u8         cq_period_start_from_cqe[0x1];
        u8         cd[0x1];
-       u8         reserved_at_22c[0x1];
+       u8         reserved_at_22d[0x1];
        u8         apm[0x1];
        u8         vector_calc[0x1];
-       u8         reserved_at_22f[0x1];
+       u8         umr_ptr_rlky[0x1];
        u8         imaicl[0x1];
-       u8         reserved_at_231[0x4];
+       u8         reserved_at_232[0x4];
        u8         qkv[0x1];
        u8         pkv[0x1];
        u8         set_deth_sqpn[0x1];
@@ -824,98 +831,101 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         uc[0x1];
        u8         rc[0x1];
 
-       u8         reserved_at_23f[0xa];
+       u8         reserved_at_240[0xa];
        u8         uar_sz[0x6];
-       u8         reserved_at_24f[0x8];
+       u8         reserved_at_250[0x8];
        u8         log_pg_sz[0x8];
 
        u8         bf[0x1];
-       u8         reserved_at_260[0x1];
+       u8         reserved_at_261[0x1];
        u8         pad_tx_eth_packet[0x1];
-       u8         reserved_at_262[0x8];
+       u8         reserved_at_263[0x8];
        u8         log_bf_reg_size[0x5];
-       u8         reserved_at_26f[0x10];
+       u8         reserved_at_270[0x10];
 
-       u8         reserved_at_27f[0x10];
+       u8         reserved_at_280[0x10];
        u8         max_wqe_sz_sq[0x10];
 
-       u8         reserved_at_29f[0x10];
+       u8         reserved_at_2a0[0x10];
        u8         max_wqe_sz_rq[0x10];
 
-       u8         reserved_at_2bf[0x10];
+       u8         reserved_at_2c0[0x10];
        u8         max_wqe_sz_sq_dc[0x10];
 
-       u8         reserved_at_2df[0x7];
+       u8         reserved_at_2e0[0x7];
        u8         max_qp_mcg[0x19];
 
-       u8         reserved_at_2ff[0x18];
+       u8         reserved_at_300[0x18];
        u8         log_max_mcg[0x8];
 
-       u8         reserved_at_31f[0x3];
+       u8         reserved_at_320[0x3];
        u8         log_max_transport_domain[0x5];
-       u8         reserved_at_327[0x3];
+       u8         reserved_at_328[0x3];
        u8         log_max_pd[0x5];
-       u8         reserved_at_32f[0xb];
+       u8         reserved_at_330[0xb];
        u8         log_max_xrcd[0x5];
 
-       u8         reserved_at_33f[0x20];
+       u8         reserved_at_340[0x20];
 
-       u8         reserved_at_35f[0x3];
+       u8         reserved_at_360[0x3];
        u8         log_max_rq[0x5];
-       u8         reserved_at_367[0x3];
+       u8         reserved_at_368[0x3];
        u8         log_max_sq[0x5];
-       u8         reserved_at_36f[0x3];
+       u8         reserved_at_370[0x3];
        u8         log_max_tir[0x5];
-       u8         reserved_at_377[0x3];
+       u8         reserved_at_378[0x3];
        u8         log_max_tis[0x5];
 
        u8         basic_cyclic_rcv_wqe[0x1];
-       u8         reserved_at_380[0x2];
+       u8         reserved_at_381[0x2];
        u8         log_max_rmp[0x5];
-       u8         reserved_at_387[0x3];
+       u8         reserved_at_388[0x3];
        u8         log_max_rqt[0x5];
-       u8         reserved_at_38f[0x3];
+       u8         reserved_at_390[0x3];
        u8         log_max_rqt_size[0x5];
-       u8         reserved_at_397[0x3];
+       u8         reserved_at_398[0x3];
        u8         log_max_tis_per_sq[0x5];
 
-       u8         reserved_at_39f[0x3];
+       u8         reserved_at_3a0[0x3];
        u8         log_max_stride_sz_rq[0x5];
-       u8         reserved_at_3a7[0x3];
+       u8         reserved_at_3a8[0x3];
        u8         log_min_stride_sz_rq[0x5];
-       u8         reserved_at_3af[0x3];
+       u8         reserved_at_3b0[0x3];
        u8         log_max_stride_sz_sq[0x5];
-       u8         reserved_at_3b7[0x3];
+       u8         reserved_at_3b8[0x3];
        u8         log_min_stride_sz_sq[0x5];
 
-       u8         reserved_at_3bf[0x1b];
+       u8         reserved_at_3c0[0x1b];
        u8         log_max_wq_sz[0x5];
 
        u8         nic_vport_change_event[0x1];
-       u8         reserved_at_3e0[0xa];
+       u8         reserved_at_3e1[0xa];
        u8         log_max_vlan_list[0x5];
-       u8         reserved_at_3ef[0x3];
+       u8         reserved_at_3f0[0x3];
        u8         log_max_current_mc_list[0x5];
-       u8         reserved_at_3f7[0x3];
+       u8         reserved_at_3f8[0x3];
        u8         log_max_current_uc_list[0x5];
 
-       u8         reserved_at_3ff[0x80];
+       u8         reserved_at_400[0x80];
 
-       u8         reserved_at_47f[0x3];
+       u8         reserved_at_480[0x3];
        u8         log_max_l2_table[0x5];
-       u8         reserved_at_487[0x8];
+       u8         reserved_at_488[0x8];
        u8         log_uar_page_sz[0x10];
 
-       u8         reserved_at_49f[0x20];
+       u8         reserved_at_4a0[0x20];
        u8         device_frequency_mhz[0x20];
        u8         device_frequency_khz[0x20];
-       u8         reserved_at_4ff[0x5f];
-       u8         cqe_zip[0x1];
 
-       u8         cqe_zip_timeout[0x10];
-       u8         cqe_zip_max_num[0x10];
+       u8         reserved_at_500[0x80];
 
-       u8         reserved_at_57f[0x220];
+       u8         reserved_at_580[0x3f];
+       u8         cqe_compression[0x1];
+
+       u8         cqe_compression_timeout[0x10];
+       u8         cqe_compression_max_num[0x10];
+
+       u8         reserved_at_5e0[0x220];
 };
 
 enum mlx5_flow_destination_type {
@@ -997,7 +1007,13 @@ struct mlx5_ifc_wq_bits {
        u8         reserved_at_118[0x3];
        u8         log_wq_sz[0x5];
 
-       u8         reserved_at_120[0x4e0];
+       u8         reserved_at_120[0x15];
+       u8         log_wqe_num_of_strides[0x3];
+       u8         two_byte_shift_en[0x1];
+       u8         reserved_at_139[0x4];
+       u8         log_wqe_stride_size[0x3];
+
+       u8         reserved_at_140[0x4c0];
 
        struct mlx5_ifc_cmd_pas_bits pas[0];
 };
@@ -2196,7 +2212,8 @@ struct mlx5_ifc_sqc_bits {
        u8         flush_in_error_en[0x1];
        u8         reserved_at_4[0x4];
        u8         state[0x4];
-       u8         reserved_at_c[0x14];
+       u8         reg_umr[0x1];
+       u8         reserved_at_d[0x13];
 
        u8         reserved_at_20[0x8];
        u8         user_index[0x18];
@@ -2244,7 +2261,8 @@ enum {
 
 struct mlx5_ifc_rqc_bits {
        u8         rlky[0x1];
-       u8         reserved_at_1[0x2];
+       u8         reserved_at_1[0x1];
+       u8         scatter_fcs[0x1];
        u8         vsd[0x1];
        u8         mem_rq_type[0x4];
        u8         state[0x4];
@@ -2601,6 +2619,11 @@ enum {
        MLX5_CQC_ST_FIRED                                 = 0xa,
 };
 
+enum {
+       MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+       MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+};
+
 struct mlx5_ifc_cqc_bits {
        u8         status[0x4];
        u8         reserved_at_4[0x4];
@@ -2609,8 +2632,8 @@ struct mlx5_ifc_cqc_bits {
        u8         reserved_at_c[0x1];
        u8         scqe_break_moderation_en[0x1];
        u8         oi[0x1];
-       u8         reserved_at_f[0x2];
-       u8         cqe_zip_en[0x1];
+       u8         cq_period_mode[0x2];
+       u8         cqe_comp_en[0x1];
        u8         mini_cqe_res_format[0x2];
        u8         st[0x4];
        u8         reserved_at_18[0x8];
@@ -2984,7 +3007,11 @@ struct mlx5_ifc_set_fte_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -5178,7 +5205,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -5205,7 +5236,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -5346,7 +5381,11 @@ struct mlx5_ifc_delete_fte_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -5792,7 +5831,11 @@ struct mlx5_ifc_create_flow_table_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -5836,7 +5879,11 @@ struct mlx5_ifc_create_flow_group_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -6369,6 +6416,17 @@ struct mlx5_ifc_ptys_reg_bits {
        u8         reserved_at_1a0[0x60];
 };
 
+struct mlx5_ifc_mlcr_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_at_10[0x20];
+
+       u8         beacon_duration[0x10];
+       u8         reserved_at_40[0x10];
+
+       u8         beacon_remain[0x10];
+};
+
 struct mlx5_ifc_ptas_reg_bits {
        u8         reserved_at_0[0x20];
 
@@ -6778,6 +6836,16 @@ struct mlx5_ifc_pamp_reg_bits {
        u8         index_data[18][0x10];
 };
 
+struct mlx5_ifc_pcmr_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_at_10[0x2e];
+       u8         fcs_cap[0x1];
+       u8         reserved_at_3f[0x1f];
+       u8         fcs_chk[0x1];
+       u8         reserved_at_5f[0x1];
+};
+
 struct mlx5_ifc_lane_2_module_mapping_bits {
        u8         reserved_at_0[0x6];
        u8         rx_lane[0x2];
@@ -7114,6 +7182,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_pspa_reg_bits pspa_reg;
        struct mlx5_ifc_ptas_reg_bits ptas_reg;
        struct mlx5_ifc_ptys_reg_bits ptys_reg;
+       struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
        struct mlx5_ifc_pude_reg_bits pude_reg;
        struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
        struct mlx5_ifc_slrg_reg_bits slrg_reg;
@@ -7147,7 +7216,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_at_60[0x20];
 
        u8         table_type[0x8];
        u8         reserved_at_88[0x18];
@@ -7178,7 +7251,9 @@ struct mlx5_ifc_modify_flow_table_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x20];
+       u8         other_vport[0x1];
+       u8         reserved_at_41[0xf];
+       u8         vport_number[0x10];
 
        u8         reserved_at_60[0x10];
        u8         modify_field_select[0x10];
@@ -7244,4 +7319,34 @@ struct mlx5_ifc_qtct_reg_bits {
        u8         tclass[0x3];
 };
 
+struct mlx5_ifc_mcia_reg_bits {
+       u8         l[0x1];
+       u8         reserved_at_1[0x7];
+       u8         module[0x8];
+       u8         reserved_at_10[0x8];
+       u8         status[0x8];
+
+       u8         i2c_device_address[0x8];
+       u8         page_number[0x8];
+       u8         device_address[0x10];
+
+       u8         reserved_at_40[0x10];
+       u8         size[0x10];
+
+       u8         reserved_at_60[0x20];
+
+       u8         dword_0[0x20];
+       u8         dword_1[0x20];
+       u8         dword_2[0x20];
+       u8         dword_3[0x20];
+       u8         dword_4[0x20];
+       u8         dword_5[0x20];
+       u8         dword_6[0x20];
+       u8         dword_7[0x20];
+       u8         dword_8[0x20];
+       u8         dword_9[0x20];
+       u8         dword_10[0x20];
+       u8         dword_11[0x20];
+};
+
 #endif /* MLX5_IFC_H */
index a1d145abd4eb55975f5f51e75febb3a70646fa87..9851862c0ec5e40f13af1706d8b5b5db3335e18b 100644 (file)
 
 #include <linux/mlx5/driver.h>
 
+enum mlx5_beacon_duration {
+       MLX5_BEACON_DURATION_OFF = 0x0,
+       MLX5_BEACON_DURATION_INF = 0xffff,
+};
+
+enum mlx5_module_id {
+       MLX5_MODULE_ID_SFP              = 0x3,
+       MLX5_MODULE_ID_QSFP             = 0xC,
+       MLX5_MODULE_ID_QSFP_PLUS        = 0xD,
+       MLX5_MODULE_ID_QSFP28           = 0x11,
+};
+
+#define MLX5_EEPROM_MAX_BYTES                  32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK       0x000000ff
+#define MLX5_I2C_ADDR_LOW              0x50
+#define MLX5_I2C_ADDR_HIGH             0x51
+#define MLX5_EEPROM_PAGE_LENGTH                256
+
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
 int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
                         int ptys_size, int proto_mask, u8 local_port);
@@ -53,10 +71,11 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
                               enum mlx5_port_status status);
 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
                                 enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
                              u8 port);
 
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
 int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
 int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
 
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+                        bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+                            u16 offset, u16 size, u8 *data);
+
 #endif /* __MLX5_PORT_H__ */
index cf031a3f16c583047d6c59389e7d57ce198356d1..64221027bf1f0d8bdaa2a1506e18da338e0f0c1e 100644 (file)
@@ -668,6 +668,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
                                struct mlx5_core_qp *sq);
 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
                                  struct mlx5_core_qp *sq);
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+                             int reset, void *out, int out_size);
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+                                 u32 *out_of_buffer);
 
 static inline const char *mlx5_qp_type_str(int type)
 {
index bd93e63236036b7086206714ec3e61cb052358c6..301da4a5e6bfa340363799310a21251c685935f0 100644 (file)
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 *addr);
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
                                      u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
index ed6407d1b7b5628ec8019e7afa667ab5e9a415d4..864d7221de846e44e600245eaac773ab7116bcca 100644 (file)
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
  *
  * A page may belong to an inode's memory mapping. In this case, page->mapping
  * is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_CACHE_SIZE.
+ * in units of PAGE_SIZE.
  *
  * If pagecache pages are not associated with an inode, they are said to be
  * anonymous pages. These may become associated with the swapcache, and in that
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
        page = compound_head(page);
        if (atomic_read(compound_mapcount_ptr(page)) >= 0)
                return true;
+       if (PageHuge(page))
+               return false;
        for (i = 0; i < hpage_nr_pages(page); i++) {
                if (atomic_read(&page[i]._mapcount) >= 0)
                        return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
 
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+                               pmd_t pmd);
 
 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
                unsigned long size);
@@ -1250,78 +1254,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
                            unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            struct vm_area_struct **vmas);
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            struct vm_area_struct **vmas);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages, int *locked);
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
                               int write, int force, struct page **pages,
                               unsigned int gup_flags);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
-/* suppress warnings from use in EXPORT_SYMBOL() */
-#ifndef __DISABLE_GUP_DEPRECATED
-#define __gup_deprecated __deprecated
-#else
-#define __gup_deprecated
-#endif
-/*
- * These macros provide backward-compatibility with the old
- * get_user_pages() variants which took tsk/mm.  These
- * functions/macros provide both compile-time __deprecated so we
- * can catch old-style use and not break the build.  The actual
- * functions also have WARN_ON()s to let us know at runtime if
- * the get_user_pages() should have been the "remote" variant.
- *
- * These are hideous, but temporary.
- *
- * If you run into one of these __deprecated warnings, look
- * at how you are calling get_user_pages().  If you are calling
- * it with current/current->mm as the first two arguments,
- * simply remove those arguments.  The behavior will be the same
- * as it is now.  If you are calling it on another task, use
- * get_user_pages_remote() instead.
- *
- * Any questions?  Ask Dave Hansen <dave@sr71.net>
- */
-long
-__gup_deprecated
-get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
-               struct vm_area_struct **vmas);
-#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
-       get_user_pages
-#define get_user_pages(...) GUP_MACRO(__VA_ARGS__,     \
-               get_user_pages8, x,                     \
-               get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
-               int *locked);
-#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
-       get_user_pages_locked
-#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__,     \
-               get_user_pages_locked8, x,                      \
-               get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages);
-#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...)   \
-       get_user_pages_unlocked
-#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__,   \
-               get_user_pages_unlocked7, x,                    \
-               get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
-
 /* Container for pinned pfns / pages */
 struct frame_vector {
        unsigned int nr_allocated;      /* Number of frames we have space for */
index 944b2b37313b49bffd581963a5f13380a7c6e520..c2d75b4fa86c05012a509dd188e2a394d2cb2170 100644 (file)
@@ -341,7 +341,7 @@ struct vm_area_struct {
 
        /* Information about our backing store: */
        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
-                                          units, *not* PAGE_CACHE_SIZE */
+                                          units */
        struct file * vm_file;          /* File we map to (can be NULL). */
        void * vm_private_data;         /* was vm_pte (shared mem) */
 
index 49175e4ced11288c535eb41cf967683858d6bc3b..9aa49a05fe389214c46b6ea48577c813d525a874 100644 (file)
@@ -218,8 +218,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res);
 struct socket *sock_alloc(void);
 void sock_release(struct socket *sock);
 int sock_sendmsg(struct socket *sock, struct msghdr *msg);
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
-                int flags);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
 struct socket *sockfd_lookup(int fd, int *err);
 struct socket *sock_from_file(struct file *file, int *err);
@@ -246,7 +245,15 @@ do {                                                               \
        net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)                         \
        net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...)                                  \
+do {                                                                   \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
+           net_ratelimit())                                            \
+               __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
+} while (0)
+#elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
        net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
 #else
index a734bf43d1902b7e7299d98de54da1d9d303cbd7..bc87362667497fd845a2fcc5ad0eddbf031d1eaf 100644 (file)
@@ -39,6 +39,7 @@ enum {
        NETIF_F_UFO_BIT,                /* ... UDPv4 fragmentation */
        NETIF_F_GSO_ROBUST_BIT,         /* ... ->SKB_GSO_DODGY */
        NETIF_F_TSO_ECN_BIT,            /* ... TCP ECN support */
+       NETIF_F_TSO_MANGLEID_BIT,       /* ... IPV4 ID mangling allowed */
        NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
        NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
@@ -47,6 +48,10 @@ enum {
        NETIF_F_GSO_SIT_BIT,            /* ... SIT tunnel with TSO */
        NETIF_F_GSO_UDP_TUNNEL_BIT,     /* ... UDP TUNNEL with TSO */
        NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
+       NETIF_F_GSO_PARTIAL_BIT,        /* ... Only segment inner-most L4
+                                        *     in hardware and all other
+                                        *     headers in software.
+                                        */
        NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
                NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
@@ -120,6 +125,8 @@ enum {
 #define NETIF_F_GSO_SIT                __NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_TSO_MANGLEID   __NETIF_F(TSO_MANGLEID)
+#define NETIF_F_GSO_PARTIAL     __NETIF_F(GSO_PARTIAL)
 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
@@ -145,10 +152,6 @@ enum {
 #define NETIF_F_GSO_MASK       (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
                __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
 
-/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
-                                NETIF_F_TSO6 | NETIF_F_UFO)
-
 /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
  * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
  * this would be contradictory
@@ -156,11 +159,15 @@ enum {
 #define NETIF_F_CSUM_MASK      (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
                                 NETIF_F_HW_CSUM)
 
-#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | \
+                                NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
 
 #define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
                                 NETIF_F_FSO)
 
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE   (NETIF_F_ALL_TSO | NETIF_F_UFO)
+
 /*
  * If one device supports one of these features, then enable them
  * for all in netdev_increment_features.
index cb4e508b3f3803875c750ae47e605364f4f50ab6..c2f5112f08f703111c56d9bc48e58ef000ff3a4b 100644 (file)
@@ -106,7 +106,6 @@ enum netdev_tx {
        __NETDEV_TX_MIN  = INT_MIN,     /* make sure enum is signed */
        NETDEV_TX_OK     = 0x00,        /* driver took care of packet */
        NETDEV_TX_BUSY   = 0x10,        /* driver tx path was busy*/
-       NETDEV_TX_LOCKED = 0x20,        /* driver tx lock was already taken */
 };
 typedef enum netdev_tx netdev_tx_t;
 
@@ -570,28 +569,27 @@ struct netdev_queue {
 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
        int                     numa_node;
 #endif
+       unsigned long           tx_maxrate;
+       /*
+        * Number of TX timeouts for this queue
+        * (/sys/class/net/DEV/Q/trans_timeout)
+        */
+       unsigned long           trans_timeout;
 /*
  * write-mostly part
  */
        spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
        int                     xmit_lock_owner;
        /*
-        * please use this field instead of dev->trans_start
+        * Time (in jiffies) of last Tx
         */
        unsigned long           trans_start;
 
-       /*
-        * Number of TX timeouts for this queue
-        * (/sys/class/net/DEV/Q/trans_timeout)
-        */
-       unsigned long           trans_timeout;
-
        unsigned long           state;
 
 #ifdef CONFIG_BQL
        struct dql              dql;
 #endif
-       unsigned long           tx_maxrate;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -831,7 +829,6 @@ struct tc_to_netdev {
  *     the queue before that can happen; it's for obsolete devices and weird
  *     corner cases, but the stack really does a non-trivial amount
  *     of useless work if you return NETDEV_TX_BUSY.
- *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  *     Required; cannot be NULL.
  *
  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
@@ -1548,7 +1545,6 @@ enum netdev_priv_flags {
  *
  *     @offload_fwd_mark:      Offload device fwding mark
  *
- *     @trans_start:           Time (in jiffies) of last Tx
  *     @watchdog_timeo:        Represents the timeout that is used by
  *                             the watchdog (see dev_watchdog())
  *     @watchdog_timer:        List of timers
@@ -1586,8 +1582,6 @@ enum netdev_priv_flags {
  *     @gso_max_size:  Maximum size of generic segmentation offload
  *     @gso_max_segs:  Maximum number of segments that can be passed to the
  *                     NIC for GSO
- *     @gso_min_segs:  Minimum number of segments that can be passed to the
- *                     NIC for GSO
  *
  *     @dcbnl_ops:     Data Center Bridging netlink ops
  *     @num_tc:        Number of traffic classes in the net device
@@ -1656,6 +1650,7 @@ struct net_device {
        netdev_features_t       vlan_features;
        netdev_features_t       hw_enc_features;
        netdev_features_t       mpls_features;
+       netdev_features_t       gso_partial_features;
 
        int                     ifindex;
        int                     group;
@@ -1798,13 +1793,6 @@ struct net_device {
 #endif
 
        /* These may be needed for future network-power-down code. */
-
-       /*
-        * trans_start here is expensive for high speed devices on SMP,
-        * please use netdev_queue->trans_start instead.
-        */
-       unsigned long           trans_start;
-
        struct timer_list       watchdog_timer;
 
        int __percpu            *pcpu_refcnt;
@@ -1858,7 +1846,7 @@ struct net_device {
        unsigned int            gso_max_size;
 #define GSO_MAX_SEGS           65535
        u16                     gso_max_segs;
-       u16                     gso_min_segs;
+
 #ifdef CONFIG_DCB
        const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
@@ -2120,7 +2108,13 @@ struct napi_gro_cb {
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
-       /* 7 bit hole */
+       /* Used in GRE, set in fou/gue_gro_receive */
+       u8      is_fou:1;
+
+       /* Used to determine if flush_id can be ignored */
+       u8      is_atomic:1;
+
+       /* 5 bit hole */
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@ -2239,6 +2233,8 @@ struct netdev_lag_lower_state_info {
 #define NETDEV_BONDING_INFO    0x0019
 #define NETDEV_PRECHANGEUPPER  0x001A
 #define NETDEV_CHANGELOWERSTATE        0x001B
+#define NETDEV_OFFLOAD_PUSH_VXLAN      0x001C
+#define NETDEV_OFFLOAD_PUSH_GENEVE     0x001D
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2730,7 +2726,6 @@ struct softnet_data {
        /* stats */
        unsigned int            processed;
        unsigned int            time_squeeze;
-       unsigned int            cpu_collision;
        unsigned int            received_rps;
 #ifdef CONFIG_RPS
        struct softnet_data     *rps_ipi_list;
@@ -2743,11 +2738,15 @@ struct softnet_data {
        struct sk_buff          *completion_queue;
 
 #ifdef CONFIG_RPS
-       /* Elements below can be accessed between CPUs for RPS */
+       /* input_queue_head should be written by cpu owning this struct,
+        * and only read by other cpus. Worth using a cache line.
+        */
+       unsigned int            input_queue_head ____cacheline_aligned_in_smp;
+
+       /* Elements below can be accessed between CPUs for RPS/RFS */
        struct call_single_data csd ____cacheline_aligned_in_smp;
        struct softnet_data     *rps_ipi_next;
        unsigned int            cpu;
-       unsigned int            input_queue_head;
        unsigned int            input_queue_tail;
 #endif
        unsigned int            dropped;
@@ -2784,7 +2783,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
                netif_schedule_queue(netdev_get_tx_queue(dev, i));
 }
 
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
        clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
@@ -2834,7 +2833,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
        }
 }
 
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
        set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
@@ -3256,7 +3255,10 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(const struct net_device *dev,
+                       const struct sk_buff *skb);
+
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 
 extern int             netdev_budget;
 
@@ -3473,6 +3475,15 @@ static inline void txq_trans_update(struct netdev_queue *txq)
                txq->trans_start = jiffies;
 }
 
+/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
+static inline void netif_trans_update(struct net_device *dev)
+{
+       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+       if (txq->trans_start != jiffies)
+               txq->trans_start = jiffies;
+}
+
 /**
  *     netif_tx_lock - grab network device transmit lock
  *     @dev: network device
@@ -3984,13 +3995,14 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
-       netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+       netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 
        /* check flags correspondence */
        BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
@@ -3999,6 +4011,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
        BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
 
        return (features & feature) == feature;
index f48b8a664b0f8c17951f2710f8af2ff71ad8376d..83b9a2e0d8d46a64dfe0faeda4b67ea6ce972677 100644 (file)
@@ -351,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
        return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
                nla_put_net64(skb, IPSET_ATTR_SKBMARK,
                              cpu_to_be64((u64)skbinfo->skbmark << 32 |
-                                         skbinfo->skbmarkmask))) ||
+                                         skbinfo->skbmarkmask),
+                             IPSET_ATTR_PAD)) ||
               (skbinfo->skbprio &&
                nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
                              cpu_to_be32(skbinfo->skbprio))) ||
@@ -374,9 +375,11 @@ static inline bool
 ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
 {
        return nla_put_net64(skb, IPSET_ATTR_BYTES,
-                            cpu_to_be64(ip_set_get_bytes(counter))) ||
+                            cpu_to_be64(ip_set_get_bytes(counter)),
+                            IPSET_ATTR_PAD) ||
               nla_put_net64(skb, IPSET_ATTR_PACKETS,
-                            cpu_to_be64(ip_set_get_packets(counter)));
+                            cpu_to_be64(ip_set_get_packets(counter)),
+                            IPSET_ATTR_PAD);
 }
 
 static inline void
index 80a305b85323a3452c30445a1137a29cce5f1600..dc4f58a3cdcc7a635b2c6da55fc0f727b7b7827b 100644 (file)
@@ -242,11 +242,18 @@ void xt_unregister_match(struct xt_match *target);
 int xt_register_matches(struct xt_match *match, unsigned int n);
 void xt_unregister_matches(struct xt_match *match, unsigned int n);
 
+int xt_check_entry_offsets(const void *base, const char *elems,
+                          unsigned int target_offset,
+                          unsigned int next_offset);
+
 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
                   bool inv_proto);
 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
                    bool inv_proto);
 
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+                                struct xt_counters_info *info, bool compat);
+
 struct xt_table *xt_register_table(struct net *net,
                                   const struct xt_table *table,
                                   struct xt_table_info *bootstrap,
@@ -373,16 +380,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
  * allows us to return 0 for single core systems without forcing
  * callers to deal with SMP vs. NONSMP issues.
  */
-static inline u64 xt_percpu_counter_alloc(void)
+static inline unsigned long xt_percpu_counter_alloc(void)
 {
        if (nr_cpu_ids > 1) {
                void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
                                                    sizeof(struct xt_counters));
 
                if (res == NULL)
-                       return (u64) -ENOMEM;
+                       return -ENOMEM;
 
-               return (u64) (__force unsigned long) res;
+               return (__force unsigned long) res;
        }
 
        return 0;
@@ -480,7 +487,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
 int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
                              unsigned int *size);
 int xt_compat_match_to_user(const struct xt_entry_match *m,
                            void __user **dstptr, unsigned int *size);
@@ -490,6 +497,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
                                unsigned int *size);
 int xt_compat_target_to_user(const struct xt_entry_target *t,
                             void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+                                 unsigned int target_offset,
+                                 unsigned int next_offset);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
index f2f650f136ee6fe181dfa27b71d7b944fc1f9357..957049f72290d4b6c328845535c664aea4afc7c0 100644 (file)
@@ -41,8 +41,8 @@ struct nfs_page {
        struct page             *wb_page;       /* page to read in/write out */
        struct nfs_open_context *wb_context;    /* File state context info */
        struct nfs_lock_context *wb_lock_context;       /* lock context info */
-       pgoff_t                 wb_index;       /* Offset >> PAGE_CACHE_SHIFT */
-       unsigned int            wb_offset,      /* Offset & ~PAGE_CACHE_MASK */
+       pgoff_t                 wb_index;       /* Offset >> PAGE_SHIFT */
+       unsigned int            wb_offset,      /* Offset & ~PAGE_MASK */
                                wb_pgbase,      /* Start of page data */
                                wb_bytes;       /* Length of request */
        struct kref             wb_kref;        /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
 static inline
 loff_t req_offset(struct nfs_page *req)
 {
-       return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
+       return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
 }
 
 #endif /* _LINUX_NFS_PAGE_H */
index 9abb763e4b863e81aedcd34b868a597b3aaf3567..e9fcf90b270d29f72dbfff44a3fc52b25bc1fc59 100644 (file)
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
 {
        unsigned len = le16_to_cpu(dlen);
 
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
        if (len == NILFS_MAX_REC_LEN)
                return 1 << 16;
 #endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
 
 static inline __le16 nilfs_rec_len_to_disk(unsigned len)
 {
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
        if (len == (1 << 16))
                return cpu_to_le16(NILFS_MAX_REC_LEN);
        else if (len > (1 << 16))
index 167342c2ce6b05d29d05dcc529023eac5cadb9ec..0f6f6607f5927f555e555bb9d771e14d41724d36 100644 (file)
@@ -92,6 +92,8 @@ enum {
        IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
        IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
 
+       IEEE802154_ATTR_PAD,
+
        __IEEE802154_ATTR_MAX,
 };
 
index 7fcb681baadf172824017d067721c65189638565..31758036787c3a490bb4aa31dda27b8a12be48da 100644 (file)
@@ -133,7 +133,7 @@ void of_core_init(void);
 
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_OF;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
 }
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
index f4ed4f1b0c77ac84f65a4586502863fd78448063..6b052aa7b5b79de6712ee36fe3a424f6f93bd548 100644 (file)
@@ -516,6 +516,27 @@ static inline int PageTransCompound(struct page *page)
        return PageCompound(page);
 }
 
+/*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+       return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
 /*
  * PageTransTail returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
 #else
 TESTPAGEFLAG_FALSE(TransHuge)
 TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
 TESTPAGEFLAG_FALSE(TransTail)
 TESTPAGEFLAG_FALSE(DoubleMap)
        TESTSETFLAG_FALSE(DoubleMap)
index 1ebd65c914220efa86814b7353353381bcb10d08..7e1ab155c67c78dd6e41defebd238bce0d59af7f 100644 (file)
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
                                (__force unsigned long)mask;
 }
 
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT       PAGE_SHIFT
-#define PAGE_CACHE_SIZE                PAGE_SIZE
-#define PAGE_CACHE_MASK                PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page)           get_page(page)
-#define page_cache_release(page)       put_page(page)
 void release_pages(struct page **pages, int nr, bool cold);
 
 /*
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
                return page->index << compound_order(page);
 
        if (likely(!PageTransTail(page)))
-               return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               return page->index;
 
        /*
         *  We don't initialize ->index for tail pages: calculate based on
         *  head page
         */
-       pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = compound_head(page)->index;
        pgoff += page - compound_head(page);
        return pgoff;
 }
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
  */
 static inline loff_t page_offset(struct page *page)
 {
-       return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+       return ((loff_t)page->index) << PAGE_SHIFT;
 }
 
 static inline loff_t page_file_offset(struct page *page)
 {
-       return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+       return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
 }
 
 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
                return linear_hugepage_index(vma, address);
        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
        pgoff += vma->vm_pgoff;
-       return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       return pgoff;
 }
 
 extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 /*
  * Fault a userspace page into pagetables.  Return non-zero on a fault.
  *
- * This assumes that two userspace pages are always sufficient.  That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * This assumes that two userspace pages are always sufficient.
  */
 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 {
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
 
 static inline unsigned long dir_pages(struct inode *inode)
 {
-       return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
-                              PAGE_CACHE_SHIFT;
+       return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
+                              PAGE_SHIFT;
 }
 
 #endif /* _LINUX_PAGEMAP_H */
index 004b8133417dc9dd54315b1ca5f15a9cc28e9a2a..932ec74909c6fc79530f262c06eb2ff6ad0e9b21 100644 (file)
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
 /* Vital product data routines */
 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
index 2abd7918f64ff198ac6d14f85457f7ffccd6f0b7..2d24b283aa2d989b83caec4323c2126c4256c664 100644 (file)
@@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+                             struct ethtool_link_ksettings *cmd);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+                             const struct ethtool_link_ksettings *cmd);
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
@@ -825,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
 void phy_ethtool_get_wol(struct phy_device *phydev,
                         struct ethtool_wolinfo *wol);
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+                                  struct ethtool_link_ksettings *cmd);
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+                                  const struct ethtool_link_ksettings *cmd);
 
 int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
index ac6d872ce067e19fb99bb57e7b0505e20732b7cd..57d146fe44dd84e926199d81a8ca0a74a2d012fc 100644 (file)
@@ -72,6 +72,18 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 }
 #endif
 
+static inline bool arch_has_pmem_api(void)
+{
+       return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+}
+
+static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
+               size_t size)
+{
+       memcpy(dst, (void __force *) src, size);
+       return 0;
+}
+
 /*
  * memcpy_from_pmem - read from persistent memory with error handling
  * @dst: destination buffer
@@ -83,12 +95,10 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
                size_t size)
 {
-       return arch_memcpy_from_pmem(dst, src, size);
-}
-
-static inline bool arch_has_pmem_api(void)
-{
-       return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+       if (arch_has_pmem_api())
+               return arch_memcpy_from_pmem(dst, src, size);
+       else
+               return default_memcpy_from_pmem(dst, src, size);
 }
 
 /**
index e1d69834a11fb38c45fe8e6fe0b3d0ad53af7ee4..3a4c806be156a8cd9163b01f45f2e1bb599cd4a5 100644 (file)
@@ -27,6 +27,7 @@ struct qed_dev_eth_info {
 struct qed_update_vport_rss_params {
        u16     rss_ind_table[128];
        u32     rss_key[10];
+       u8      rss_caps;
 };
 
 struct qed_update_vport_params {
@@ -111,6 +112,13 @@ struct qed_queue_start_common_params {
        u16 sb_idx;
 };
 
+struct qed_tunn_params {
+       u16 vxlan_port;
+       u8 update_vxlan_port;
+       u16 geneve_port;
+       u8 update_geneve_port;
+};
+
 struct qed_eth_cb_ops {
        struct qed_common_cb_ops common;
 };
@@ -165,9 +173,12 @@ struct qed_eth_ops {
 
        void (*get_vport_stats)(struct qed_dev *cdev,
                                struct qed_eth_stats *stats);
+
+       int (*tunn_config)(struct qed_dev *cdev,
+                          struct qed_tunn_params *params);
 };
 
-const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+const struct qed_eth_ops *qed_get_eth_ops(void);
 void qed_put_eth_ops(void);
 
 #endif
index 1f7599c77cd4185de5579d6c5801655706a982f3..d72c832a9397a6e89e37874bab89ee1c6c052ac5 100644 (file)
@@ -110,6 +110,7 @@ struct qed_link_params {
 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
 #define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
        u32     override_flags;
        bool    autoneg;
        u32     adv_speeds;
@@ -118,6 +119,12 @@ struct qed_link_params {
 #define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
 #define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
        u32     pause_config;
+#define QED_LINK_LOOPBACK_NONE                  BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
+#define QED_LINK_LOOPBACK_EXT                   BIT(3)
+#define QED_LINK_LOOPBACK_MAC                   BIT(4)
+       u32     loopback_mode;
 };
 
 struct qed_link_output {
@@ -158,7 +165,47 @@ struct qed_common_cb_ops {
                               struct qed_link_output   *link);
 };
 
+struct qed_selftest_ops {
+/**
+ * @brief selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_interrupt)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_memory)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_register)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_clock)(struct qed_dev *cdev);
+};
+
 struct qed_common_ops {
+       struct qed_selftest_ops *selftest;
+
        struct qed_dev* (*probe)(struct pci_dev *dev,
                                 enum qed_protocol protocol,
                                 u32 dp_module, u8 dp_level);
@@ -211,6 +258,16 @@ struct qed_common_ops {
 
        void            (*simd_handler_clean)(struct qed_dev *cdev,
                                              int index);
+
+/**
+ * @brief can_link_change - can the instance change the link or not
+ *
+ * @param cdev
+ *
+ * @return true if link-change is allowed, false otherwise.
+ */
+       bool (*can_link_change)(struct qed_dev *cdev);
+
 /**
  * @brief set_link - set links according to params
  *
@@ -271,15 +328,6 @@ struct qed_common_ops {
                       enum qed_led_mode mode);
 };
 
-/**
- * @brief qed_get_protocol_version
- *
- * @param protocol
- *
- * @return version supported by qed for given protocol driver
- */
-u32 qed_get_protocol_version(enum qed_protocol protocol);
-
 #define MASK_FIELD(_name, _value) \
        ((_value) &= (_name ## _MASK))
 
@@ -393,16 +441,16 @@ struct qed_eth_stats {
 
        /* port */
        u64     rx_64_byte_packets;
-       u64     rx_127_byte_packets;
-       u64     rx_255_byte_packets;
-       u64     rx_511_byte_packets;
-       u64     rx_1023_byte_packets;
-       u64     rx_1518_byte_packets;
-       u64     rx_1522_byte_packets;
-       u64     rx_2047_byte_packets;
-       u64     rx_4095_byte_packets;
-       u64     rx_9216_byte_packets;
-       u64     rx_16383_byte_packets;
+       u64     rx_65_to_127_byte_packets;
+       u64     rx_128_to_255_byte_packets;
+       u64     rx_256_to_511_byte_packets;
+       u64     rx_512_to_1023_byte_packets;
+       u64     rx_1024_to_1518_byte_packets;
+       u64     rx_1519_to_1522_byte_packets;
+       u64     rx_1519_to_2047_byte_packets;
+       u64     rx_2048_to_4095_byte_packets;
+       u64     rx_4096_to_9216_byte_packets;
+       u64     rx_9217_to_16383_byte_packets;
        u64     rx_crc_errors;
        u64     rx_mac_crtl_frames;
        u64     rx_pause_frames;
@@ -524,4 +572,15 @@ static inline void internal_ram_wr(void __iomem *addr,
        __internal_ram_wr(NULL, addr, size, data);
 }
 
+enum qed_rss_caps {
+       QED_RSS_IPV4            = 0x1,
+       QED_RSS_IPV6            = 0x2,
+       QED_RSS_IPV4_TCP        = 0x4,
+       QED_RSS_IPV6_TCP        = 0x8,
+       QED_RSS_IPV4_UDP        = 0x10,
+       QED_RSS_IPV6_UDP        = 0x20,
+};
+
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
 #endif
index 17d4f849c65e3dcf5dfbbd8100abf1236073ad55..8beb98dcf14f066fc4e9075ed0b94cc29998c9bc 100644 (file)
@@ -487,6 +487,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
                first->pprev = &n->next;
 }
 
+/**
+ * hlist_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_tail_rcu(struct hlist_node *n,
+                                     struct hlist_head *h)
+{
+       struct hlist_node *i, *last = NULL;
+
+       for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+               last = i;
+
+       if (last) {
+               n->next = last->next;
+               n->pprev = &last->next;
+               rcu_assign_pointer(hlist_next_rcu(last), n);
+       } else {
+               hlist_add_head_rcu(n, h);
+       }
+}
+
 /**
  * hlist_add_before_rcu
  * @n: the new element to add to the hash list.
index 1c33dd7da4a7d860004264e00bd406645df1ed96..4ae95f7e8597b0b43575d04aaf524cf252761e6e 100644 (file)
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
        if (!is_a_nulls(first))
                first->pprev = &n->next;
 }
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals.  NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+                                       struct hlist_nulls_head *h)
+{
+       struct hlist_nulls_node *i, *last = NULL;
+
+       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+            i = hlist_nulls_next_rcu(i))
+               last = i;
+
+       if (last) {
+               n->next = last->next;
+               n->pprev = &last->next;
+               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+       } else {
+               hlist_nulls_add_head_rcu(n, h);
+       }
+}
+
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
deleted file mode 100644 (file)
index b65d19d..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Integrated Device Technology Inc.
- * Copyright (c) 2015, Prodrive Technologies
- * Copyright (c) 2015, Texas Instruments Incorporated
- * Copyright (c) 2015, RapidIO Trade Association
- * All rights reserved.
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License(GPL) Version 2, or the BSD-3 Clause license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its contributors
- * may be used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RIO_MPORT_CDEV_H_
-#define _RIO_MPORT_CDEV_H_
-
-#ifndef __user
-#define __user
-#endif
-
-struct rio_mport_maint_io {
-       uint32_t rioid;         /* destID of remote device */
-       uint32_t hopcount;      /* hopcount to remote device */
-       uint32_t offset;        /* offset in register space */
-       size_t length;          /* length in bytes */
-       void __user *buffer;    /* data buffer */
-};
-
-/*
- * Definitions for RapidIO data transfers:
- * - memory mapped (MAPPED)
- * - packet generation from memory (TRANSFER)
- */
-#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
-#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
-#define RIO_CAP_DBL_SEND               (1 << 2)
-#define RIO_CAP_DBL_RECV               (1 << 3)
-#define RIO_CAP_PW_SEND                        (1 << 4)
-#define RIO_CAP_PW_RECV                        (1 << 5)
-#define RIO_CAP_MAP_OUTB               (1 << 6)
-#define RIO_CAP_MAP_INB                        (1 << 7)
-
-struct rio_mport_properties {
-       uint16_t hdid;
-       uint8_t id;                     /* Physical port ID */
-       uint8_t  index;
-       uint32_t flags;
-       uint32_t sys_size;              /* Default addressing size */
-       uint8_t  port_ok;
-       uint8_t  link_speed;
-       uint8_t  link_width;
-       uint32_t dma_max_sge;
-       uint32_t dma_max_size;
-       uint32_t dma_align;
-       uint32_t transfer_mode;         /* Default transfer mode */
-       uint32_t cap_sys_size;          /* Capable system sizes */
-       uint32_t cap_addr_size;         /* Capable addressing sizes */
-       uint32_t cap_transfer_mode;     /* Capable transfer modes */
-       uint32_t cap_mport;             /* Mport capabilities */
-};
-
-/*
- * Definitions for RapidIO events;
- * - incoming port-writes
- * - incoming doorbells
- */
-#define RIO_DOORBELL   (1 << 0)
-#define RIO_PORTWRITE  (1 << 1)
-
-struct rio_doorbell {
-       uint32_t rioid;
-       uint16_t payload;
-};
-
-struct rio_doorbell_filter {
-       uint32_t rioid;                 /* 0xffffffff to match all ids */
-       uint16_t low;
-       uint16_t high;
-};
-
-
-struct rio_portwrite {
-       uint32_t payload[16];
-};
-
-struct rio_pw_filter {
-       uint32_t mask;
-       uint32_t low;
-       uint32_t high;
-};
-
-/* RapidIO base address for inbound requests set to value defined below
- * indicates that no specific RIO-to-local address translation is requested
- * and driver should use direct (one-to-one) address mapping.
-*/
-#define RIO_MAP_ANY_ADDR       (uint64_t)(~((uint64_t) 0))
-
-struct rio_mmap {
-       uint32_t rioid;
-       uint64_t rio_addr;
-       uint64_t length;
-       uint64_t handle;
-       void *address;
-};
-
-struct rio_dma_mem {
-       uint64_t length;                /* length of DMA memory */
-       uint64_t dma_handle;            /* handle associated with this memory */
-       void *buffer;                   /* pointer to this memory */
-};
-
-
-struct rio_event {
-       unsigned int header;    /* event type RIO_DOORBELL or RIO_PORTWRITE */
-       union {
-               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
-               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
-       } u;
-};
-
-enum rio_transfer_sync {
-       RIO_TRANSFER_SYNC,      /* synchronous transfer */
-       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
-       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
-};
-
-enum rio_transfer_dir {
-       RIO_TRANSFER_DIR_READ,  /* Read operation */
-       RIO_TRANSFER_DIR_WRITE, /* Write operation */
-};
-
-/*
- * RapidIO data exchange transactions are lists of individual transfers. Each
- * transfer exchanges data between two RapidIO devices by remote direct memory
- * access and has its own completion code.
- *
- * The RapidIO specification defines four types of data exchange requests:
- * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
- * to specify the required type of write operation or combination of them when
- * only the last data packet requires response.
- *
- * NREAD:    read up to 256 bytes from remote device memory into local memory
- * NWRITE:   write up to 256 bytes from local memory to remote device memory
- *           without confirmation
- * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
- * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
- *
- * The default exchange is chosen from NREAD and any of the WRITE modes as the
- * driver sees fit. For write requests the user can explicitly choose between
- * any of the write modes for each transaction.
- */
-enum rio_exchange {
-       RIO_EXCHANGE_DEFAULT,   /* Default method */
-       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
-       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
-       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
-       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
-       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
-};
-
-struct rio_transfer_io {
-       uint32_t rioid;                 /* Target destID */
-       uint64_t rio_addr;              /* Address in target's RIO mem space */
-       enum rio_exchange method;       /* Data exchange method */
-       void __user *loc_addr;
-       uint64_t handle;
-       uint64_t offset;                /* Offset in buffer */
-       uint64_t length;                /* Length in bytes */
-       uint32_t completion_code;       /* Completion code for this transfer */
-};
-
-struct rio_transaction {
-       uint32_t transfer_mode;         /* Data transfer mode */
-       enum rio_transfer_sync sync;    /* Synchronization method */
-       enum rio_transfer_dir dir;      /* Transfer direction */
-       size_t count;                   /* Number of transfers */
-       struct rio_transfer_io __user *block;   /* Array of <count> transfers */
-};
-
-struct rio_async_tx_wait {
-       uint32_t token;         /* DMA transaction ID token */
-       uint32_t timeout;       /* Wait timeout in msec, if 0 use default TO */
-};
-
-#define RIO_MAX_DEVNAME_SZ     20
-
-struct rio_rdev_info {
-       uint32_t destid;
-       uint8_t hopcount;
-       uint32_t comptag;
-       char name[RIO_MAX_DEVNAME_SZ + 1];
-};
-
-/* Driver IOCTL codes */
-#define RIO_MPORT_DRV_MAGIC           'm'
-
-#define RIO_MPORT_MAINT_HDID_SET       \
-       _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
-#define RIO_MPORT_MAINT_COMPTAG_SET    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
-#define RIO_MPORT_MAINT_PORT_IDX_GET   \
-       _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
-#define RIO_MPORT_GET_PROPERTIES \
-       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
-#define RIO_MPORT_MAINT_READ_LOCAL \
-       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_LOCAL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_READ_REMOTE \
-       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_REMOTE \
-       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
-#define RIO_ENABLE_DOORBELL_RANGE      \
-       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
-#define RIO_DISABLE_DOORBELL_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
-#define RIO_ENABLE_PORTWRITE_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
-#define RIO_DISABLE_PORTWRITE_RANGE    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
-#define RIO_SET_EVENT_MASK             \
-       _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
-#define RIO_GET_EVENT_MASK             \
-       _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
-#define RIO_MAP_OUTBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
-#define RIO_UNMAP_OUTBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
-#define RIO_MAP_INBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
-#define RIO_UNMAP_INBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
-#define RIO_ALLOC_DMA \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
-#define RIO_FREE_DMA \
-       _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
-#define RIO_TRANSFER \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
-#define RIO_WAIT_FOR_ASYNC \
-       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
-#define RIO_DEV_ADD \
-       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
-#define RIO_DEV_DEL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
-
-#endif /* _RIO_MPORT_CDEV_H_ */
index 60bba7e032dc33c57156751c6a5270e1f72a0feb..52c4847b05e2882a72d04c3c75fc4d55c2b4a6b9 100644 (file)
@@ -720,7 +720,7 @@ struct signal_struct {
        struct task_cputime cputime_expires;
 
 #ifdef CONFIG_NO_HZ_FULL
-       unsigned long tick_dep_mask;
+       atomic_t tick_dep_mask;
 #endif
 
        struct list_head cpu_timers[3];
@@ -1549,7 +1549,7 @@ struct task_struct {
 #endif
 
 #ifdef CONFIG_NO_HZ_FULL
-       unsigned long tick_dep_mask;
+       atomic_t tick_dep_mask;
 #endif
        unsigned long nvcsw, nivcsw; /* context switch counts */
        u64 start_time;         /* monotonic time in nsec */
index a9414fd49dc6a27795878c38ef0dca52502d0e9a..dacb5e71199435a1062ea4c74151649f1d64f1e8 100644 (file)
@@ -705,4 +705,71 @@ typedef struct sctp_auth_chunk {
        sctp_authhdr_t auth_hdr;
 } __packed sctp_auth_chunk_t;
 
+struct sctp_info {
+       __u32   sctpi_tag;
+       __u32   sctpi_state;
+       __u32   sctpi_rwnd;
+       __u16   sctpi_unackdata;
+       __u16   sctpi_penddata;
+       __u16   sctpi_instrms;
+       __u16   sctpi_outstrms;
+       __u32   sctpi_fragmentation_point;
+       __u32   sctpi_inqueue;
+       __u32   sctpi_outqueue;
+       __u32   sctpi_overall_error;
+       __u32   sctpi_max_burst;
+       __u32   sctpi_maxseg;
+       __u32   sctpi_peer_rwnd;
+       __u32   sctpi_peer_tag;
+       __u8    sctpi_peer_capable;
+       __u8    sctpi_peer_sack;
+       __u16   __reserved1;
+
+       /* assoc status info */
+       __u64   sctpi_isacks;
+       __u64   sctpi_osacks;
+       __u64   sctpi_opackets;
+       __u64   sctpi_ipackets;
+       __u64   sctpi_rtxchunks;
+       __u64   sctpi_outofseqtsns;
+       __u64   sctpi_idupchunks;
+       __u64   sctpi_gapcnt;
+       __u64   sctpi_ouodchunks;
+       __u64   sctpi_iuodchunks;
+       __u64   sctpi_oodchunks;
+       __u64   sctpi_iodchunks;
+       __u64   sctpi_octrlchunks;
+       __u64   sctpi_ictrlchunks;
+
+       /* primary transport info */
+       struct sockaddr_storage sctpi_p_address;
+       __s32   sctpi_p_state;
+       __u32   sctpi_p_cwnd;
+       __u32   sctpi_p_srtt;
+       __u32   sctpi_p_rto;
+       __u32   sctpi_p_hbinterval;
+       __u32   sctpi_p_pathmaxrxt;
+       __u32   sctpi_p_sackdelay;
+       __u32   sctpi_p_sackfreq;
+       __u32   sctpi_p_ssthresh;
+       __u32   sctpi_p_partial_bytes_acked;
+       __u32   sctpi_p_flight_size;
+       __u16   sctpi_p_error;
+       __u16   __reserved2;
+
+       /* sctp sock info */
+       __u32   sctpi_s_autoclose;
+       __u32   sctpi_s_adaptation_ind;
+       __u32   sctpi_s_pd_point;
+       __u8    sctpi_s_nodelay;
+       __u8    sctpi_s_disable_fragments;
+       __u8    sctpi_s_v4mapped;
+       __u8    sctpi_s_frag_interleave;
+};
+
+struct sctp_infox {
+       struct sctp_info *sctpinfo;
+       struct sctp_association *asoc;
+};
+
 #endif /* __LINUX_SCTP_H__ */
index dde00defbaa52bd2ae380c76e7df305389aa5b8f..f3d45dd42695e1f813d67103a118dc03d27cdcbc 100644 (file)
@@ -7,13 +7,10 @@
 #include <linux/mutex.h>
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
 
 struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
 
 struct seq_file {
        char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
        struct mutex lock;
        const struct seq_operations *op;
        int poll_event;
-#ifdef CONFIG_USER_NS
-       struct user_namespace *user_ns;
-#endif
+       const struct file *file;
        void *private;
 };
 
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
 #ifdef CONFIG_USER_NS
-       return seq->user_ns;
+       return seq->file->f_cred->user_ns;
 #else
        extern struct user_namespace init_user_ns;
        return &init_user_ns;
index 007381270ff8d7d92cb05dd31e432983edd4281a..c413c588a24f854be9e4df78d8a6872b6b1ff9f3 100644 (file)
@@ -382,14 +382,10 @@ enum {
 
        /* generate software time stamp when entering packet scheduling */
        SKBTX_SCHED_TSTAMP = 1 << 6,
-
-       /* generate software timestamp on peer data acknowledgment */
-       SKBTX_ACK_TSTAMP = 1 << 7,
 };
 
 #define SKBTX_ANY_SW_TSTAMP    (SKBTX_SW_TSTAMP    | \
-                                SKBTX_SCHED_TSTAMP | \
-                                SKBTX_ACK_TSTAMP)
+                                SKBTX_SCHED_TSTAMP)
 #define SKBTX_ANY_TSTAMP       (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
 
 /*
@@ -465,23 +461,27 @@ enum {
        /* This indicates the tcp segment has CWR set. */
        SKB_GSO_TCP_ECN = 1 << 3,
 
-       SKB_GSO_TCPV6 = 1 << 4,
+       SKB_GSO_TCP_FIXEDID = 1 << 4,
+
+       SKB_GSO_TCPV6 = 1 << 5,
 
-       SKB_GSO_FCOE = 1 << 5,
+       SKB_GSO_FCOE = 1 << 6,
 
-       SKB_GSO_GRE = 1 << 6,
+       SKB_GSO_GRE = 1 << 7,
 
-       SKB_GSO_GRE_CSUM = 1 << 7,
+       SKB_GSO_GRE_CSUM = 1 << 8,
 
-       SKB_GSO_IPIP = 1 << 8,
+       SKB_GSO_IPIP = 1 << 9,
 
-       SKB_GSO_SIT = 1 << 9,
+       SKB_GSO_SIT = 1 << 10,
 
-       SKB_GSO_UDP_TUNNEL = 1 << 10,
+       SKB_GSO_UDP_TUNNEL = 1 << 11,
 
-       SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+       SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
 
-       SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+       SKB_GSO_PARTIAL = 1 << 13,
+
+       SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
 };
 
 #if BITS_PER_LONG > 32
@@ -1325,6 +1325,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
        return dataref != 1;
 }
 
+static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
+{
+       might_sleep_if(gfpflags_allow_blocking(pri));
+
+       if (skb_header_cloned(skb))
+               return pskb_expand_head(skb, 0, 0, pri);
+
+       return 0;
+}
+
 /**
  *     skb_header_release - release reference to header
  *     @skb: buffer to operate on
@@ -2982,6 +2992,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
 int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int skb_vlan_pop(struct sk_buff *skb);
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+                            gfp_t gfp);
 
 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
 {
@@ -3589,7 +3601,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
  * Keeps track of level of encapsulation of network headers.
  */
 struct skb_gso_cb {
-       int     mac_offset;
+       union {
+               int     mac_offset;
+               int     data_offset;
+       };
        int     encap_level;
        __wsum  csum;
        __u16   csum_start;
index d0cb6d189a0a02bd28459c7b143229563f23c4e4..46a984f5e3a3ae9fd38cab4a7b5fc142420bc385 100644 (file)
@@ -45,13 +45,39 @@ struct qcom_smd_driver {
        int (*callback)(struct qcom_smd_device *, const void *, size_t);
 };
 
+#if IS_ENABLED(CONFIG_QCOM_SMD)
+
 int qcom_smd_driver_register(struct qcom_smd_driver *drv);
 void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
 
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#else
+
+static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
+{
+       return -ENXIO;
+}
+
+static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
+{
+       /* This shouldn't be possible */
+       WARN_ON(1);
+}
+
+static inline int qcom_smd_send(struct qcom_smd_channel *channel,
+                               const void *data, int len)
+{
+       /* This shouldn't be possible */
+       WARN_ON(1);
+       return -ENXIO;
+}
+
+#endif
+
 #define module_qcom_smd_driver(__smd_driver) \
        module_driver(__smd_driver, qcom_smd_driver_register, \
                      qcom_smd_driver_unregister)
 
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
 
 #endif
index 73bf6c6a833b37b907bc6f8852356d8976b9662d..b5cc5a6d7011a06e0310d5bf0136dd42569bda03 100644 (file)
@@ -201,8 +201,9 @@ struct ucred {
 #define AF_NFC         39      /* NFC sockets                  */
 #define AF_VSOCK       40      /* vSockets                     */
 #define AF_KCM         41      /* Kernel Connection Multiplexor*/
+#define AF_QIPCRTR     42      /* Qualcomm IPC Router          */
 
-#define AF_MAX         42      /* For now.. */
+#define AF_MAX         43      /* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC      AF_UNSPEC
@@ -249,6 +250,7 @@ struct ucred {
 #define PF_NFC         AF_NFC
 #define PF_VSOCK       AF_VSOCK
 #define PF_KCM         AF_KCM
+#define PF_QIPCRTR     AF_QIPCRTR
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
index cc0fc712bb8238e27446dc92899906e55cff4de7..7ca44fb5b675d1c078a2a2f121056606282dad58 100644 (file)
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
  *
  * These happen to all be powers of 2, which is not strictly
  * necessary but helps enforce the real limitation, which is
- * that they should be multiples of PAGE_CACHE_SIZE.
+ * that they should be multiples of PAGE_SIZE.
  *
  * For UDP transports, a block plus NFS,RPC, and UDP headers
  * has to fit into the IP datagram limit of 64K.  The largest
index d18b65c53dbb8b421f5a1499a2791ddc4f594319..0a4cd4703f403f0d65f867aaa6dcc3e128fcde05 100644 (file)
@@ -433,9 +433,9 @@ struct backing_dev_info;
 #define si_swapinfo(val) \
        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 /* only sparc can not include linux/pagemap.h in this file
- * so leave page_cache_release and release_pages undeclared... */
+ * so leave put_page and release_pages undeclared... */
 #define free_page_and_swap_cache(page) \
-       page_cache_release(page)
+       put_page(page)
 #define free_pages_and_swap_cache(pages, nr) \
        release_pages((pages), (nr), false);
 
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
 #ifdef CONFIG_MEMCG
 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
+       /* Cgroup2 doesn't have per-cgroup swappiness */
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return vm_swappiness;
+
        /* root ? */
        if (mem_cgroup_disabled() || !memcg->css.parent)
                return vm_swappiness;
index a55d0523f75d63bfbe43355a56dd0f08e835cdc9..1b8a5a7876ce67abbc0d6cac4004f816c31bf221 100644 (file)
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
 
 struct thermal_trip {
        struct device_node *np;
-       unsigned long int temperature;
-       unsigned long int hysteresis;
+       int temperature;
+       int hysteresis;
        enum thermal_trip_type type;
 };
 
index fe6441203b59f4f3a1459e31cdb334c6b925816e..222f6aa0418f8877746fba79090ab790ab23e03a 100644 (file)
@@ -609,6 +609,11 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
 void perf_trace_buf_update(void *record, u16 type);
 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
 
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+                              struct trace_event_call *call, u64 count,
+                              struct pt_regs *regs, struct hlist_head *head,
+                              struct task_struct *task);
+
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
                       u64 count, struct pt_regs *regs, void *head,
index 161052477f77009e59744339ac5377e4a9b03356..b742b5e47cc209ac60e441cc42fcf8f431239fab 100644 (file)
@@ -7,7 +7,7 @@
  * defined; unless noted otherwise, they are optional, and can be
  * filled in with a null pointer.
  *
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
  *
  *     Return the tty device corresponding to idx, NULL if there is not
  *     one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
 
 struct tty_operations {
        struct tty_struct * (*lookup)(struct tty_driver *driver,
-                       struct inode *inode, int idx);
+                       struct file *filp, int idx);
        int  (*install)(struct tty_driver *driver, struct tty_struct *tty);
        void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
        int  (*open)(struct tty_struct * tty, struct file * filp);
index df89c9bcba7db8dbde3bbf2b99f9af6ed562b112..d3a2bb712af3b9613b98ef9c3219f8dcd31568a5 100644 (file)
@@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
 #endif
 }
 
+static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+       raw_write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+       raw_write_seqcount_end(&syncp->seq);
+#endif
+}
+
 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
index 7f5f78bd15ad448414fa8c06fb61fa25fb152b5f..245f57dbbb614e818d2f067f1f5c431d7e9f193b 100644 (file)
@@ -79,6 +79,8 @@
                /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
        US_FLAG(MAX_SECTORS_240,        0x08000000)             \
                /* Sets max_sectors to 240 */                   \
+       US_FLAG(NO_REPORT_LUNS, 0x10000000)                     \
+               /* Cannot handle REPORT_LUNS */                 \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 8a0f55b6c2ba80e25c67caf89e2050fa58ae322f..88e3ab496e8f84911491773eb026e1770ea20844 100644 (file)
@@ -375,6 +375,9 @@ struct vb2_ops {
 /**
  * struct vb2_ops - driver-specific callbacks
  *
+ * @verify_planes_array: Verify that a given user space structure contains
+ *                     enough planes for the buffer. This is called
+ *                     for each dequeued buffer.
  * @fill_user_buffer:  given a vb2_buffer fill in the userspace structure.
  *                     For V4L2 this is a struct v4l2_buffer.
  * @fill_vb2_buffer:   given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
  *                     the vb2_buffer struct.
  */
 struct vb2_buf_ops {
+       int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
        void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
        int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
                                struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
  * @fileio_read_once:          report EOF after reading the first buffer
  * @fileio_write_immediately:  queue buffer after each write() call
  * @allow_zero_bytesused:      allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ *              has not been called. This is a vb1 idiom that has been adopted
+ *              also by vb2.
  * @lock:      pointer to a mutex that protects the vb2_queue struct. The
  *             driver can set this to a mutex to let the v4l2 core serialize
  *             the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
        unsigned                        fileio_read_once:1;
        unsigned                        fileio_write_immediately:1;
        unsigned                        allow_zero_bytesused:1;
+       unsigned                   quirk_poll_must_check_waiting_for_buffers:1;
 
        struct mutex                    *lock;
        void                            *owner;
index da3a77d25fcbe1d32fcf9fff0659e48612a0e0c1..da84cf920b78b8a07be8c0a4bb95d2f89fefe350 100644 (file)
@@ -58,6 +58,9 @@
 #include <net/ipv6.h>
 #include <net/net_namespace.h>
 
+/* special link-layer handling */
+#include <net/mac802154.h>
+
 #define EUI64_ADDR_LEN         8
 
 #define LOWPAN_NHC_MAX_ID_LEN  1
@@ -93,7 +96,7 @@ static inline bool lowpan_is_iphc(u8 dispatch)
 }
 
 #define LOWPAN_PRIV_SIZE(llpriv_size)  \
-       (sizeof(struct lowpan_priv) + llpriv_size)
+       (sizeof(struct lowpan_dev) + llpriv_size)
 
 enum lowpan_lltypes {
        LOWPAN_LLTYPE_BTLE,
@@ -129,7 +132,7 @@ lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
        return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
 }
 
-struct lowpan_priv {
+struct lowpan_dev {
        enum lowpan_lltypes lltype;
        struct dentry *iface_debugfs;
        struct lowpan_iphc_ctx_table ctx;
@@ -139,11 +142,23 @@ struct lowpan_priv {
 };
 
 static inline
-struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+struct lowpan_dev *lowpan_dev(const struct net_device *dev)
 {
        return netdev_priv(dev);
 }
 
+/* private device info */
+struct lowpan_802154_dev {
+       struct net_device       *wdev; /* wpan device ptr */
+       u16                     fragment_tag;
+};
+
+static inline struct
+lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev)
+{
+       return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv;
+}
+
 struct lowpan_802154_cb {
        u16 d_tag;
        unsigned int d_size;
@@ -157,6 +172,22 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
        return (struct lowpan_802154_cb *)skb->cb;
 }
 
+static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+                                                      const void *lladdr)
+{
+       /* fe:80::XXXX:XXXX:XXXX:XXXX
+        *        \_________________/
+        *              hwaddr
+        */
+       ipaddr->s6_addr[0] = 0xFE;
+       ipaddr->s6_addr[1] = 0x80;
+       memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+       /* second bit-flip (Universe/Local)
+        * is done according RFC2464
+        */
+       ipaddr->s6_addr[8] ^= 0x02;
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
index 2a19fe111c78836629bd914086b17a48f96c472c..03e322b30218244b9bde7fca5438cacc20923307 100644 (file)
@@ -135,6 +135,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
 static inline void tc_action_net_exit(struct tc_action_net *tn)
 {
        tcf_hashinfo_destroy(tn->ops, tn->hinfo);
+       kfree(tn->hinfo);
 }
 
 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
index e797d45a5ae62697361e1ca827a7638542cc4f41..ac1bc3c49fbdf9832fdb4f895657c0336bb61926 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef _NET_RXRPC_H
 #define _NET_RXRPC_H
 
+#include <linux/skbuff.h>
 #include <linux/rxrpc.h>
 
 struct rxrpc_call;
@@ -19,11 +20,12 @@ struct rxrpc_call;
 /*
  * the mark applied to socket buffers that may be intercepted
  */
-enum {
+enum rxrpc_skb_mark {
        RXRPC_SKB_MARK_DATA,            /* data message */
        RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
        RXRPC_SKB_MARK_BUSY,            /* server busy message */
        RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
+       RXRPC_SKB_MARK_LOCAL_ABORT,     /* local abort message */
        RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
        RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
        RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
index 5d38d980b89ddd512de9547f0e7aff10b58446f2..eefcf3e96421e3f8f26abe65a55914aa31b6fcae 100644 (file)
@@ -61,6 +61,8 @@
 #define HCI_RS232      4
 #define HCI_PCI                5
 #define HCI_SDIO       6
+#define HCI_SPI                7
+#define HCI_I2C                8
 
 /* HCI controller types */
 #define HCI_BREDR      0x00
index b39277eb251f755abbb77dfaaeb7414f8c84d4e7..183916e168f167ab3f580094c5fbf243f3d803ad 100644 (file)
@@ -67,26 +67,6 @@ struct wiphy;
  * wireless hardware capability structures
  */
 
-/**
- * enum ieee80211_band - supported frequency bands
- *
- * The bands are assigned this way because the supported
- * bitrates differ in these bands.
- *
- * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
- * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
- * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
- * @IEEE80211_NUM_BANDS: number of defined bands
- */
-enum ieee80211_band {
-       IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
-       IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
-       IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
-
-       /* keep last */
-       IEEE80211_NUM_BANDS
-};
-
 /**
  * enum ieee80211_channel_flags - channel flags
  *
@@ -167,7 +147,7 @@ enum ieee80211_channel_flags {
  * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
  */
 struct ieee80211_channel {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u16 center_freq;
        u16 hw_value;
        u32 flags;
@@ -324,7 +304,7 @@ struct ieee80211_sta_vht_cap {
 struct ieee80211_supported_band {
        struct ieee80211_channel *channels;
        struct ieee80211_rate *bitrates;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int n_channels;
        int n_bitrates;
        struct ieee80211_sta_ht_cap ht_cap;
@@ -1370,7 +1350,7 @@ struct mesh_setup {
        bool user_mpm;
        u8 dtim_period;
        u16 beacon_interval;
-       int mcast_rate[IEEE80211_NUM_BANDS];
+       int mcast_rate[NUM_NL80211_BANDS];
        u32 basic_rates;
 };
 
@@ -1468,7 +1448,7 @@ struct cfg80211_scan_request {
        size_t ie_len;
        u32 flags;
 
-       u32 rates[IEEE80211_NUM_BANDS];
+       u32 rates[NUM_NL80211_BANDS];
 
        struct wireless_dev *wdev;
 
@@ -1750,7 +1730,12 @@ enum cfg80211_assoc_req_flags {
  * @ie_len: Length of ie buffer in octets
  * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
  * @crypto: crypto settings
- * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ *     to indicate a request to reassociate within the ESS instead of a request
+ *     do the initial association with the ESS. When included, this is set to
+ *     the BSSID of the current association, i.e., to the value that is
+ *     included in the Current AP address field of the Reassociation Request
+ *     frame.
  * @flags:  See &enum cfg80211_assoc_req_flags
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *     will be used in ht_capa.  Un-supported values will be ignored.
@@ -1855,7 +1840,7 @@ struct cfg80211_ibss_params {
        bool privacy;
        bool control_port;
        bool userspace_handles_dfs;
-       int mcast_rate[IEEE80211_NUM_BANDS];
+       int mcast_rate[NUM_NL80211_BANDS];
        struct ieee80211_ht_cap ht_capa;
        struct ieee80211_ht_cap ht_capa_mask;
 };
@@ -1867,7 +1852,7 @@ struct cfg80211_ibss_params {
  * @delta: value of RSSI level adjustment.
  */
 struct cfg80211_bss_select_adjust {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        s8 delta;
 };
 
@@ -1882,7 +1867,7 @@ struct cfg80211_bss_select_adjust {
 struct cfg80211_bss_selection {
        enum nl80211_bss_select_attr behaviour;
        union {
-               enum ieee80211_band band_pref;
+               enum nl80211_band band_pref;
                struct cfg80211_bss_select_adjust adjust;
        } param;
 };
@@ -1925,7 +1910,12 @@ struct cfg80211_bss_selection {
  * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
  *     networks.
  * @bss_select: criteria to be used for BSS selection.
- * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ *     to indicate a request to reassociate within the ESS instead of a request
+ *     do the initial association with the ESS. When included, this is set to
+ *     the BSSID of the current association, i.e., to the value that is
+ *     included in the Current AP address field of the Reassociation Request
+ *     frame.
  */
 struct cfg80211_connect_params {
        struct ieee80211_channel *channel;
@@ -1980,7 +1970,7 @@ struct cfg80211_bitrate_mask {
                u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
                u16 vht_mcs[NL80211_VHT_NSS_MAX];
                enum nl80211_txrate_gi gi;
-       } control[IEEE80211_NUM_BANDS];
+       } control[NUM_NL80211_BANDS];
 };
 /**
  * struct cfg80211_pmksa - PMK Security Association
@@ -2377,7 +2367,17 @@ struct cfg80211_qos_map {
  * @connect: Connect to the ESS with the specified parameters. When connected,
  *     call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
  *     If the connection fails for some reason, call cfg80211_connect_result()
- *     with the status from the AP.
+ *     with the status from the AP. The driver is allowed to roam to other
+ *     BSSes within the ESS when the other BSS matches the connect parameters.
+ *     When such roaming is initiated by the driver, the driver is expected to
+ *     verify that the target matches the configured security parameters and
+ *     to use Reassociation Request frame instead of Association Request frame.
+ *     The connect function can also be used to request the driver to perform
+ *     a specific roam when connected to an ESS. In that case, the prev_bssid
+ *     parameter is set to the BSSID of the currently associated BSS as an
+ *     indication of requesting reassociation. In both the driver-initiated and
+ *     new connect() call initiated roaming cases, the result of roaming is
+ *     indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss().
  *     (invoked with the wireless_dev mutex held)
  * @disconnect: Disconnect from the BSS/ESS.
  *     (invoked with the wireless_dev mutex held)
@@ -2657,7 +2657,7 @@ struct cfg80211_ops {
        int     (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
 
        int     (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
-                                 int rate[IEEE80211_NUM_BANDS]);
+                                 int rate[NUM_NL80211_BANDS]);
 
        int     (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
 
@@ -3303,7 +3303,7 @@ struct wiphy {
         * help determine whether you own this wiphy or not. */
        const void *privid;
 
-       struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
+       struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
 
        /* Lets us get back the wiphy on the callback */
        void (*reg_notifier)(struct wiphy *wiphy,
@@ -3638,7 +3638,7 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
  * @band: band, necessary due to channel number overlap
  * Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
  */
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
@@ -5069,7 +5069,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
  * Returns %true if the conversion was successful, %false otherwise.
  */
 bool ieee80211_operating_class_to_band(u8 operating_class,
-                                      enum ieee80211_band *band);
+                                      enum nl80211_band *band);
 
 /**
  * ieee80211_chandef_to_operating_class - convert chandef to operation class
index c0a92e2c286d6cc0942591dfc02d9b485ae6810e..74c9693d4941dd0f4a08526968a9783727249fb5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
 #include <net/sock.h>
+#include <net/inet_sock.h>
 
 #ifdef CONFIG_CGROUP_NET_CLASSID
 struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
         * softirqs always disables bh.
         */
        if (in_serving_softirq()) {
+               struct sock *sk = skb_to_full_sk(skb);
+
                /* If there is an sock_cgroup_classid we'll use that. */
-               if (!skb->sk)
+               if (!sk || !sk_fullsock(sk))
                        return 0;
 
-               classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
        }
 
        return classid;
index d168aca115ccf0ff4c4ca98107327c533314c618..a6e428f801350809322aaff08d92904e059c3b5a 100644 (file)
@@ -87,27 +87,6 @@ static inline codel_time_t codel_get_time(void)
         ((s32)((a) - (b)) >= 0))
 #define codel_time_before_eq(a, b)     codel_time_after_eq(b, a)
 
-/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
-struct codel_skb_cb {
-       codel_time_t enqueue_time;
-};
-
-static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
-{
-       qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
-       return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
-static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
-{
-       return get_codel_cb(skb)->enqueue_time;
-}
-
-static void codel_set_enqueue_time(struct sk_buff *skb)
-{
-       get_codel_cb(skb)->enqueue_time = codel_get_time();
-}
-
 static inline u32 codel_time_to_us(codel_time_t val)
 {
        u64 valns = ((u64)val << CODEL_SHIFT);
@@ -176,198 +155,10 @@ struct codel_stats {
 
 #define CODEL_DISABLED_THRESHOLD INT_MAX
 
-static void codel_params_init(struct codel_params *params,
-                             const struct Qdisc *sch)
-{
-       params->interval = MS2TIME(100);
-       params->target = MS2TIME(5);
-       params->mtu = psched_mtu(qdisc_dev(sch));
-       params->ce_threshold = CODEL_DISABLED_THRESHOLD;
-       params->ecn = false;
-}
-
-static void codel_vars_init(struct codel_vars *vars)
-{
-       memset(vars, 0, sizeof(*vars));
-}
-
-static void codel_stats_init(struct codel_stats *stats)
-{
-       stats->maxpacket = 0;
-}
-
-/*
- * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
- * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
- *
- * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
- */
-static void codel_Newton_step(struct codel_vars *vars)
-{
-       u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
-       u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
-       u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
-
-       val >>= 2; /* avoid overflow in following multiply */
-       val = (val * invsqrt) >> (32 - 2 + 1);
-
-       vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
-}
-
-/*
- * CoDel control_law is t + interval/sqrt(count)
- * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
- * both sqrt() and divide operation.
- */
-static codel_time_t codel_control_law(codel_time_t t,
-                                     codel_time_t interval,
-                                     u32 rec_inv_sqrt)
-{
-       return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
-}
-
-static bool codel_should_drop(const struct sk_buff *skb,
-                             struct Qdisc *sch,
-                             struct codel_vars *vars,
-                             struct codel_params *params,
-                             struct codel_stats *stats,
-                             codel_time_t now)
-{
-       bool ok_to_drop;
-
-       if (!skb) {
-               vars->first_above_time = 0;
-               return false;
-       }
-
-       vars->ldelay = now - codel_get_enqueue_time(skb);
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
-
-       if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
-               stats->maxpacket = qdisc_pkt_len(skb);
-
-       if (codel_time_before(vars->ldelay, params->target) ||
-           sch->qstats.backlog <= params->mtu) {
-               /* went below - stay below for at least interval */
-               vars->first_above_time = 0;
-               return false;
-       }
-       ok_to_drop = false;
-       if (vars->first_above_time == 0) {
-               /* just went above from below. If we stay above
-                * for at least interval we'll say it's ok to drop
-                */
-               vars->first_above_time = now + params->interval;
-       } else if (codel_time_after(now, vars->first_above_time)) {
-               ok_to_drop = true;
-       }
-       return ok_to_drop;
-}
-
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
 typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
-                                               struct Qdisc *sch);
+                                               void *ctx);
 
-static struct sk_buff *codel_dequeue(struct Qdisc *sch,
-                                    struct codel_params *params,
-                                    struct codel_vars *vars,
-                                    struct codel_stats *stats,
-                                    codel_skb_dequeue_t dequeue_func)
-{
-       struct sk_buff *skb = dequeue_func(vars, sch);
-       codel_time_t now;
-       bool drop;
-
-       if (!skb) {
-               vars->dropping = false;
-               return skb;
-       }
-       now = codel_get_time();
-       drop = codel_should_drop(skb, sch, vars, params, stats, now);
-       if (vars->dropping) {
-               if (!drop) {
-                       /* sojourn time below target - leave dropping state */
-                       vars->dropping = false;
-               } else if (codel_time_after_eq(now, vars->drop_next)) {
-                       /* It's time for the next drop. Drop the current
-                        * packet and dequeue the next. The dequeue might
-                        * take us out of dropping state.
-                        * If not, schedule the next drop.
-                        * A large backlog might result in drop rates so high
-                        * that the next drop should happen now,
-                        * hence the while loop.
-                        */
-                       while (vars->dropping &&
-                              codel_time_after_eq(now, vars->drop_next)) {
-                               vars->count++; /* dont care of possible wrap
-                                               * since there is no more divide
-                                               */
-                               codel_Newton_step(vars);
-                               if (params->ecn && INET_ECN_set_ce(skb)) {
-                                       stats->ecn_mark++;
-                                       vars->drop_next =
-                                               codel_control_law(vars->drop_next,
-                                                                 params->interval,
-                                                                 vars->rec_inv_sqrt);
-                                       goto end;
-                               }
-                               stats->drop_len += qdisc_pkt_len(skb);
-                               qdisc_drop(skb, sch);
-                               stats->drop_count++;
-                               skb = dequeue_func(vars, sch);
-                               if (!codel_should_drop(skb, sch,
-                                                      vars, params, stats, now)) {
-                                       /* leave dropping state */
-                                       vars->dropping = false;
-                               } else {
-                                       /* and schedule the next drop */
-                                       vars->drop_next =
-                                               codel_control_law(vars->drop_next,
-                                                                 params->interval,
-                                                                 vars->rec_inv_sqrt);
-                               }
-                       }
-               }
-       } else if (drop) {
-               u32 delta;
-
-               if (params->ecn && INET_ECN_set_ce(skb)) {
-                       stats->ecn_mark++;
-               } else {
-                       stats->drop_len += qdisc_pkt_len(skb);
-                       qdisc_drop(skb, sch);
-                       stats->drop_count++;
-
-                       skb = dequeue_func(vars, sch);
-                       drop = codel_should_drop(skb, sch, vars, params,
-                                                stats, now);
-               }
-               vars->dropping = true;
-               /* if min went above target close to when we last went below it
-                * assume that the drop rate that controlled the queue on the
-                * last cycle is a good starting point to control it now.
-                */
-               delta = vars->count - vars->lastcount;
-               if (delta > 1 &&
-                   codel_time_before(now - vars->drop_next,
-                                     16 * params->interval)) {
-                       vars->count = delta;
-                       /* we dont care if rec_inv_sqrt approximation
-                        * is not very precise :
-                        * Next Newton steps will correct it quadratically.
-                        */
-                       codel_Newton_step(vars);
-               } else {
-                       vars->count = 1;
-                       vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
-               }
-               vars->lastcount = vars->count;
-               vars->drop_next = codel_control_law(now, params->interval,
-                                                   vars->rec_inv_sqrt);
-       }
-end:
-       if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
-           INET_ECN_set_ce(skb))
-               stats->ce_mark++;
-       return skb;
-}
 #endif
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
new file mode 100644 (file)
index 0000000..d289b91
--- /dev/null
@@ -0,0 +1,255 @@
+#ifndef __NET_SCHED_CODEL_IMPL_H
+#define __NET_SCHED_CODEL_IMPL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+static void codel_params_init(struct codel_params *params)
+{
+       params->interval = MS2TIME(100);
+       params->target = MS2TIME(5);
+       params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+       params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+       memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+       stats->maxpacket = 0;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+       u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+       u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+       u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+       val >>= 2; /* avoid overflow in following multiply */
+       val = (val * invsqrt) >> (32 - 2 + 1);
+
+       vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+                                     codel_time_t interval,
+                                     u32 rec_inv_sqrt)
+{
+       return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+                             void *ctx,
+                             struct codel_vars *vars,
+                             struct codel_params *params,
+                             struct codel_stats *stats,
+                             codel_skb_len_t skb_len_func,
+                             codel_skb_time_t skb_time_func,
+                             u32 *backlog,
+                             codel_time_t now)
+{
+       bool ok_to_drop;
+       u32 skb_len;
+
+       if (!skb) {
+               vars->first_above_time = 0;
+               return false;
+       }
+
+       skb_len = skb_len_func(skb);
+       vars->ldelay = now - skb_time_func(skb);
+
+       if (unlikely(skb_len > stats->maxpacket))
+               stats->maxpacket = skb_len;
+
+       if (codel_time_before(vars->ldelay, params->target) ||
+           *backlog <= params->mtu) {
+               /* went below - stay below for at least interval */
+               vars->first_above_time = 0;
+               return false;
+       }
+       ok_to_drop = false;
+       if (vars->first_above_time == 0) {
+               /* just went above from below. If we stay above
+                * for at least interval we'll say it's ok to drop
+                */
+               vars->first_above_time = now + params->interval;
+       } else if (codel_time_after(now, vars->first_above_time)) {
+               ok_to_drop = true;
+       }
+       return ok_to_drop;
+}
+
+static struct sk_buff *codel_dequeue(void *ctx,
+                                    u32 *backlog,
+                                    struct codel_params *params,
+                                    struct codel_vars *vars,
+                                    struct codel_stats *stats,
+                                    codel_skb_len_t skb_len_func,
+                                    codel_skb_time_t skb_time_func,
+                                    codel_skb_drop_t drop_func,
+                                    codel_skb_dequeue_t dequeue_func)
+{
+       struct sk_buff *skb = dequeue_func(vars, ctx);
+       codel_time_t now;
+       bool drop;
+
+       if (!skb) {
+               vars->dropping = false;
+               return skb;
+       }
+       now = codel_get_time();
+       drop = codel_should_drop(skb, ctx, vars, params, stats,
+                                skb_len_func, skb_time_func, backlog, now);
+       if (vars->dropping) {
+               if (!drop) {
+                       /* sojourn time below target - leave dropping state */
+                       vars->dropping = false;
+               } else if (codel_time_after_eq(now, vars->drop_next)) {
+                       /* It's time for the next drop. Drop the current
+                        * packet and dequeue the next. The dequeue might
+                        * take us out of dropping state.
+                        * If not, schedule the next drop.
+                        * A large backlog might result in drop rates so high
+                        * that the next drop should happen now,
+                        * hence the while loop.
+                        */
+                       while (vars->dropping &&
+                              codel_time_after_eq(now, vars->drop_next)) {
+                               vars->count++; /* dont care of possible wrap
+                                               * since there is no more divide
+                                               */
+                               codel_Newton_step(vars);
+                               if (params->ecn && INET_ECN_set_ce(skb)) {
+                                       stats->ecn_mark++;
+                                       vars->drop_next =
+                                               codel_control_law(vars->drop_next,
+                                                                 params->interval,
+                                                                 vars->rec_inv_sqrt);
+                                       goto end;
+                               }
+                               stats->drop_len += skb_len_func(skb);
+                               drop_func(skb, ctx);
+                               stats->drop_count++;
+                               skb = dequeue_func(vars, ctx);
+                               if (!codel_should_drop(skb, ctx,
+                                                      vars, params, stats,
+                                                      skb_len_func,
+                                                      skb_time_func,
+                                                      backlog, now)) {
+                                       /* leave dropping state */
+                                       vars->dropping = false;
+                               } else {
+                                       /* and schedule the next drop */
+                                       vars->drop_next =
+                                               codel_control_law(vars->drop_next,
+                                                                 params->interval,
+                                                                 vars->rec_inv_sqrt);
+                               }
+                       }
+               }
+       } else if (drop) {
+               u32 delta;
+
+               if (params->ecn && INET_ECN_set_ce(skb)) {
+                       stats->ecn_mark++;
+               } else {
+                       stats->drop_len += skb_len_func(skb);
+                       drop_func(skb, ctx);
+                       stats->drop_count++;
+
+                       skb = dequeue_func(vars, ctx);
+                       drop = codel_should_drop(skb, ctx, vars, params,
+                                                stats, skb_len_func,
+                                                skb_time_func, backlog, now);
+               }
+               vars->dropping = true;
+               /* if min went above target close to when we last went below it
+                * assume that the drop rate that controlled the queue on the
+                * last cycle is a good starting point to control it now.
+                */
+               delta = vars->count - vars->lastcount;
+               if (delta > 1 &&
+                   codel_time_before(now - vars->drop_next,
+                                     16 * params->interval)) {
+                       vars->count = delta;
+                       /* we dont care if rec_inv_sqrt approximation
+                        * is not very precise :
+                        * Next Newton steps will correct it quadratically.
+                        */
+                       codel_Newton_step(vars);
+               } else {
+                       vars->count = 1;
+                       vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+               }
+               vars->lastcount = vars->count;
+               vars->drop_next = codel_control_law(now, params->interval,
+                                                   vars->rec_inv_sqrt);
+       }
+end:
+       if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+           INET_ECN_set_ce(skb))
+               stats->ce_mark++;
+       return skb;
+}
+
+#endif
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
new file mode 100644 (file)
index 0000000..8144d9c
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef __NET_SCHED_CODEL_QDISC_H
+#define __NET_SCHED_CODEL_QDISC_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+       codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+       qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+       return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+       return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+       get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+#endif
index c37d257891d62705ebbe9a18e3709bb31152e8ef..1d45b61cb320b979bea8acd08cf35783302bfa72 100644 (file)
@@ -24,6 +24,7 @@ struct devlink_ops;
 struct devlink {
        struct list_head list;
        struct list_head port_list;
+       struct list_head sb_list;
        const struct devlink_ops *ops;
        struct device *dev;
        possible_net_t _net;
@@ -42,6 +43,12 @@ struct devlink_port {
        u32 split_group;
 };
 
+struct devlink_sb_pool_info {
+       enum devlink_sb_pool_type pool_type;
+       u32 size;
+       enum devlink_sb_threshold_type threshold_type;
+};
+
 struct devlink_ops {
        size_t priv_size;
        int (*port_type_set)(struct devlink_port *devlink_port,
@@ -49,6 +56,40 @@ struct devlink_ops {
        int (*port_split)(struct devlink *devlink, unsigned int port_index,
                          unsigned int count);
        int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+       int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
+                          u16 pool_index,
+                          struct devlink_sb_pool_info *pool_info);
+       int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
+                          u16 pool_index, u32 size,
+                          enum devlink_sb_threshold_type threshold_type);
+       int (*sb_port_pool_get)(struct devlink_port *devlink_port,
+                               unsigned int sb_index, u16 pool_index,
+                               u32 *p_threshold);
+       int (*sb_port_pool_set)(struct devlink_port *devlink_port,
+                               unsigned int sb_index, u16 pool_index,
+                               u32 threshold);
+       int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
+                                  unsigned int sb_index,
+                                  u16 tc_index,
+                                  enum devlink_sb_pool_type pool_type,
+                                  u16 *p_pool_index, u32 *p_threshold);
+       int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
+                                  unsigned int sb_index,
+                                  u16 tc_index,
+                                  enum devlink_sb_pool_type pool_type,
+                                  u16 pool_index, u32 threshold);
+       int (*sb_occ_snapshot)(struct devlink *devlink,
+                              unsigned int sb_index);
+       int (*sb_occ_max_clear)(struct devlink *devlink,
+                               unsigned int sb_index);
+       int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
+                                   unsigned int sb_index, u16 pool_index,
+                                   u32 *p_cur, u32 *p_max);
+       int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
+                                      unsigned int sb_index,
+                                      u16 tc_index,
+                                      enum devlink_sb_pool_type pool_type,
+                                      u32 *p_cur, u32 *p_max);
 };
 
 static inline void *devlink_priv(struct devlink *devlink)
@@ -82,6 +123,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
 void devlink_port_type_clear(struct devlink_port *devlink_port);
 void devlink_port_split_set(struct devlink_port *devlink_port,
                            u32 split_group);
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+                       u32 size, u16 ingress_pools_count,
+                       u16 egress_pools_count, u16 ingress_tc_count,
+                       u16 egress_tc_count);
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
 
 #else
 
@@ -135,6 +181,21 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port,
 {
 }
 
+static inline int devlink_sb_register(struct devlink *devlink,
+                                     unsigned int sb_index, u32 size,
+                                     u16 ingress_pools_count,
+                                     u16 egress_pools_count,
+                                     u16 ingress_tc_count,
+                                     u16 egress_tc_count)
+{
+       return 0;
+}
+
+static inline void devlink_sb_unregister(struct devlink *devlink,
+                                        unsigned int sb_index)
+{
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
index 18d1be3ad62d702f36f3b1238bbfa8622f788762..17c3d37b677930f1b6acac7af0afe3c68fa6d24e 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/ethtool.h>
@@ -65,13 +64,6 @@ struct dsa_chip_data {
         * NULL if there is only one switch chip.
         */
        s8              *rtable;
-
-       /*
-        * A switch may have a GPIO line tied to its reset pin. Parse
-        * this from the device tree, and use it before performing
-        * switch soft reset.
-        */
-       struct gpio_desc *reset;
 };
 
 struct dsa_platform_data {
@@ -110,6 +102,11 @@ struct dsa_switch_tree {
                                       struct net_device *orig_dev);
        enum dsa_tag_protocol   tag_protocol;
 
+       /*
+        * Original copy of the master netdev ethtool_ops
+        */
+       struct ethtool_ops      master_ethtool_ops;
+
        /*
         * The switch and port to which the CPU is attached.
         */
@@ -123,6 +120,8 @@ struct dsa_switch_tree {
 };
 
 struct dsa_switch {
+       struct device *dev;
+
        /*
         * Parent switch tree, and switch index.
         */
@@ -130,25 +129,21 @@ struct dsa_switch {
        int                     index;
 
        /*
-        * Tagging protocol understood by this switch
+        * Give the switch driver somewhere to hang its private data
+        * structure.
         */
-       enum dsa_tag_protocol   tag_protocol;
+       void *priv;
 
        /*
         * Configuration data for this switch.
         */
-       struct dsa_chip_data    *pd;
+       struct dsa_chip_data    *cd;
 
        /*
         * The used switch driver.
         */
        struct dsa_switch_driver        *drv;
 
-       /*
-        * Reference to host device to use.
-        */
-       struct device           *master_dev;
-
 #ifdef CONFIG_NET_DSA_HWMON
        /*
         * Hardware monitoring information
@@ -161,7 +156,7 @@ struct dsa_switch {
         * Slave mii_bus and devices for the individual ports.
         */
        u32                     dsa_port_mask;
-       u32                     phys_port_mask;
+       u32                     enabled_port_mask;
        u32                     phys_mii_mask;
        struct mii_bus          *slave_mii_bus;
        struct net_device       *ports[DSA_MAX_PORTS];
@@ -179,7 +174,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
 
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
-       return ds->phys_port_mask & (1 << p) && ds->ports[p];
+       return ds->enabled_port_mask & (1 << p) && ds->ports[p];
 }
 
 static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -195,7 +190,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
        if (dst->cpu_switch == ds->index)
                return dst->cpu_port;
        else
-               return ds->pd->rtable[dst->cpu_switch];
+               return ds->cd->rtable[dst->cpu_switch];
 }
 
 struct switchdev_trans;
@@ -207,12 +202,13 @@ struct dsa_switch_driver {
        struct list_head        list;
 
        enum dsa_tag_protocol   tag_protocol;
-       int                     priv_size;
 
        /*
         * Probing and setup.
         */
-       char    *(*probe)(struct device *host_dev, int sw_addr);
+       const char      *(*probe)(struct device *dsa_dev,
+                                 struct device *host_dev, int sw_addr,
+                                 void **priv);
        int     (*setup)(struct dsa_switch *ds);
        int     (*set_addr)(struct dsa_switch *ds, u8 *addr);
        u32     (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -341,7 +337,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
 
 static inline void *ds_to_priv(struct dsa_switch *ds)
 {
-       return (void *)(ds + 1);
+       return ds->priv;
 }
 
 static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
index 5c98443c1c9ef7eb035616d5815c347be211567b..6835d224d47b502fa3e396a02a71fed6130d6650 100644 (file)
@@ -85,12 +85,11 @@ struct dst_entry {
 #endif
 
 #ifdef CONFIG_64BIT
-       struct lwtunnel_state   *lwtstate;
        /*
         * Align __refcnt to a 64 bytes alignment
         * (L1_CACHE_SIZE would be too much)
         */
-       long                    __pad_to_align_refcnt[1];
+       long                    __pad_to_align_refcnt[2];
 #endif
        /*
         * __refcnt wants to be on a different cache line from
@@ -99,9 +98,7 @@ struct dst_entry {
        atomic_t                __refcnt;       /* client references    */
        int                     __use;
        unsigned long           lastuse;
-#ifndef CONFIG_64BIT
        struct lwtunnel_state   *lwtstate;
-#endif
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
diff --git a/include/net/fq.h b/include/net/fq.h
new file mode 100644 (file)
index 0000000..268b490
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_H
+#define __NET_SCHED_FQ_H
+
+struct fq_tin;
+
+/**
+ * struct fq_flow - per traffic flow queue
+ *
+ * @tin: owner of this flow. Used to manage collisions, i.e. when a packet
+ *     hashes to an index which points to a flow that is already owned by a
+ *     different tin the packet is destined to. In such case the implementer
+ *     must provide a fallback flow
+ * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
+ *     (deficit round robin) based round robin queuing similar to the one
+ *     found in net/sched/sch_fq_codel.c
+ * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
+ *     fat flows and efficient head-dropping if packet limit is reached
+ * @queue: sk_buff queue to hold packets
+ * @backlog: number of bytes pending in the queue. The number of packets can be
+ *     found in @queue.qlen
+ * @deficit: used for DRR++
+ */
+struct fq_flow {
+       struct fq_tin *tin;
+       struct list_head flowchain;
+       struct list_head backlogchain;
+       struct sk_buff_head queue;
+       u32 backlog;
+       int deficit;
+};
+
+/**
+ * struct fq_tin - a logical container of fq_flows
+ *
+ * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to
+ * pull interleaved packets out of the associated flows.
+ *
+ * @new_flows: linked list of fq_flow
+ * @old_flows: linked list of fq_flow
+ */
+struct fq_tin {
+       struct list_head new_flows;
+       struct list_head old_flows;
+       u32 backlog_bytes;
+       u32 backlog_packets;
+       u32 overlimit;
+       u32 collisions;
+       u32 flows;
+       u32 tx_bytes;
+       u32 tx_packets;
+};
+
+/**
+ * struct fq - main container for fair queuing purposes
+ *
+ * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
+ *     head-dropping when @backlog reaches @limit
+ * @limit: max number of packets that can be queued across all flows
+ * @backlog: number of packets queued across all flows
+ */
+struct fq {
+       struct fq_flow *flows;
+       struct list_head backlogs;
+       spinlock_t lock;
+       u32 flows_cnt;
+       u32 perturbation;
+       u32 limit;
+       u32 quantum;
+       u32 backlog;
+       u32 overlimit;
+       u32 collisions;
+};
+
+typedef struct sk_buff *fq_tin_dequeue_t(struct fq *,
+                                        struct fq_tin *,
+                                        struct fq_flow *flow);
+
+typedef void fq_skb_free_t(struct fq *,
+                          struct fq_tin *,
+                          struct fq_flow *,
+                          struct sk_buff *);
+
+typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
+                                             struct fq_tin *,
+                                             int idx,
+                                             struct sk_buff *);
+
+#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
new file mode 100644 (file)
index 0000000..163f3ed
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_IMPL_H
+#define __NET_SCHED_FQ_IMPL_H
+
+#include <net/fq.h>
+
+/* functions that are embedded into includer */
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+                                      struct fq_flow *flow)
+{
+       struct fq_tin *tin = flow->tin;
+       struct fq_flow *i;
+       struct sk_buff *skb;
+
+       lockdep_assert_held(&fq->lock);
+
+       skb = __skb_dequeue(&flow->queue);
+       if (!skb)
+               return NULL;
+
+       tin->backlog_bytes -= skb->len;
+       tin->backlog_packets--;
+       flow->backlog -= skb->len;
+       fq->backlog--;
+
+       if (flow->backlog == 0) {
+               list_del_init(&flow->backlogchain);
+       } else {
+               i = flow;
+
+               list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
+                       if (i->backlog < flow->backlog)
+                               break;
+
+               list_move_tail(&flow->backlogchain,
+                              &i->backlogchain);
+       }
+
+       return skb;
+}
+
+static struct sk_buff *fq_tin_dequeue(struct fq *fq,
+                                     struct fq_tin *tin,
+                                     fq_tin_dequeue_t dequeue_func)
+{
+       struct fq_flow *flow;
+       struct list_head *head;
+       struct sk_buff *skb;
+
+       lockdep_assert_held(&fq->lock);
+
+begin:
+       head = &tin->new_flows;
+       if (list_empty(head)) {
+               head = &tin->old_flows;
+               if (list_empty(head))
+                       return NULL;
+       }
+
+       flow = list_first_entry(head, struct fq_flow, flowchain);
+
+       if (flow->deficit <= 0) {
+               flow->deficit += fq->quantum;
+               list_move_tail(&flow->flowchain,
+                              &tin->old_flows);
+               goto begin;
+       }
+
+       skb = dequeue_func(fq, tin, flow);
+       if (!skb) {
+               /* force a pass through old_flows to prevent starvation */
+               if ((head == &tin->new_flows) &&
+                   !list_empty(&tin->old_flows)) {
+                       list_move_tail(&flow->flowchain, &tin->old_flows);
+               } else {
+                       list_del_init(&flow->flowchain);
+                       flow->tin = NULL;
+               }
+               goto begin;
+       }
+
+       flow->deficit -= skb->len;
+       tin->tx_bytes += skb->len;
+       tin->tx_packets++;
+
+       return skb;
+}
+
+static struct fq_flow *fq_flow_classify(struct fq *fq,
+                                       struct fq_tin *tin,
+                                       struct sk_buff *skb,
+                                       fq_flow_get_default_t get_default_func)
+{
+       struct fq_flow *flow;
+       u32 hash;
+       u32 idx;
+
+       lockdep_assert_held(&fq->lock);
+
+       hash = skb_get_hash_perturb(skb, fq->perturbation);
+       idx = reciprocal_scale(hash, fq->flows_cnt);
+       flow = &fq->flows[idx];
+
+       if (flow->tin && flow->tin != tin) {
+               flow = get_default_func(fq, tin, idx, skb);
+               tin->collisions++;
+               fq->collisions++;
+       }
+
+       if (!flow->tin)
+               tin->flows++;
+
+       return flow;
+}
+
+static void fq_recalc_backlog(struct fq *fq,
+                             struct fq_tin *tin,
+                             struct fq_flow *flow)
+{
+       struct fq_flow *i;
+
+       if (list_empty(&flow->backlogchain))
+               list_add_tail(&flow->backlogchain, &fq->backlogs);
+
+       i = flow;
+       list_for_each_entry_continue_reverse(i, &fq->backlogs,
+                                            backlogchain)
+               if (i->backlog > flow->backlog)
+                       break;
+
+       list_move(&flow->backlogchain, &i->backlogchain);
+}
+
+static void fq_tin_enqueue(struct fq *fq,
+                          struct fq_tin *tin,
+                          struct sk_buff *skb,
+                          fq_skb_free_t free_func,
+                          fq_flow_get_default_t get_default_func)
+{
+       struct fq_flow *flow;
+
+       lockdep_assert_held(&fq->lock);
+
+       flow = fq_flow_classify(fq, tin, skb, get_default_func);
+
+       flow->tin = tin;
+       flow->backlog += skb->len;
+       tin->backlog_bytes += skb->len;
+       tin->backlog_packets++;
+       fq->backlog++;
+
+       fq_recalc_backlog(fq, tin, flow);
+
+       if (list_empty(&flow->flowchain)) {
+               flow->deficit = fq->quantum;
+               list_add_tail(&flow->flowchain,
+                             &tin->new_flows);
+       }
+
+       __skb_queue_tail(&flow->queue, skb);
+
+       if (fq->backlog > fq->limit) {
+               flow = list_first_entry_or_null(&fq->backlogs,
+                                               struct fq_flow,
+                                               backlogchain);
+               if (!flow)
+                       return;
+
+               skb = fq_flow_dequeue(fq, flow);
+               if (!skb)
+                       return;
+
+               free_func(fq, flow->tin, flow, skb);
+
+               flow->tin->overlimit++;
+               fq->overlimit++;
+       }
+}
+
+static void fq_flow_reset(struct fq *fq,
+                         struct fq_flow *flow,
+                         fq_skb_free_t free_func)
+{
+       struct sk_buff *skb;
+
+       while ((skb = fq_flow_dequeue(fq, flow)))
+               free_func(fq, flow->tin, flow, skb);
+
+       if (!list_empty(&flow->flowchain))
+               list_del_init(&flow->flowchain);
+
+       if (!list_empty(&flow->backlogchain))
+               list_del_init(&flow->backlogchain);
+
+       flow->tin = NULL;
+
+       WARN_ON_ONCE(flow->backlog);
+}
+
+static void fq_tin_reset(struct fq *fq,
+                        struct fq_tin *tin,
+                        fq_skb_free_t free_func)
+{
+       struct list_head *head;
+       struct fq_flow *flow;
+
+       for (;;) {
+               head = &tin->new_flows;
+               if (list_empty(head)) {
+                       head = &tin->old_flows;
+                       if (list_empty(head))
+                               break;
+               }
+
+               flow = list_first_entry(head, struct fq_flow, flowchain);
+               fq_flow_reset(fq, flow, free_func);
+       }
+
+       WARN_ON_ONCE(tin->backlog_bytes);
+       WARN_ON_ONCE(tin->backlog_packets);
+}
+
+static void fq_flow_init(struct fq_flow *flow)
+{
+       INIT_LIST_HEAD(&flow->flowchain);
+       INIT_LIST_HEAD(&flow->backlogchain);
+       __skb_queue_head_init(&flow->queue);
+}
+
+static void fq_tin_init(struct fq_tin *tin)
+{
+       INIT_LIST_HEAD(&tin->new_flows);
+       INIT_LIST_HEAD(&tin->old_flows);
+}
+
+static int fq_init(struct fq *fq, int flows_cnt)
+{
+       int i;
+
+       memset(fq, 0, sizeof(fq[0]));
+       INIT_LIST_HEAD(&fq->backlogs);
+       spin_lock_init(&fq->lock);
+       fq->flows_cnt = max_t(u32, flows_cnt, 1);
+       fq->perturbation = prandom_u32();
+       fq->quantum = 300;
+       fq->limit = 8192;
+
+       fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+       if (!fq->flows)
+               return -ENOMEM;
+
+       for (i = 0; i < fq->flows_cnt; i++)
+               fq_flow_init(&fq->flows[i]);
+
+       return 0;
+}
+
+static void fq_reset(struct fq *fq,
+                    fq_skb_free_t free_func)
+{
+       int i;
+
+       for (i = 0; i < fq->flows_cnt; i++)
+               fq_flow_reset(fq, &fq->flows[i], free_func);
+
+       kfree(fq->flows);
+       fq->flows = NULL;
+}
+
+#endif
index cbafa3768d48d1e1cae3e068e86f9f0a01813c9e..610cd397890e9cd24f15966b2b20ed39e6e19859 100644 (file)
@@ -19,17 +19,19 @@ struct gnet_dump {
        /* Backward compatibility */
        int               compat_tc_stats;
        int               compat_xstats;
+       int               padattr;
        void *            xstats;
        int               xstats_len;
        struct tc_stats   tc_stats;
 };
 
 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
-                         struct gnet_dump *d);
+                         struct gnet_dump *d, int padattr);
 
 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
                                 int tc_stats_type, int xstats_type,
-                                spinlock_t *lock, struct gnet_dump *d);
+                                spinlock_t *lock, struct gnet_dump *d,
+                                int padattr);
 
 int gnet_stats_copy_basic(struct gnet_dump *d,
                          struct gnet_stats_basic_cpu __percpu *cpu,
index e6c23dc765f7ecae795e9c6acb72b5ac017e04f7..cb544a530146c2d9c38486a97cb1393769f2b891 100644 (file)
@@ -62,13 +62,11 @@ struct genevehdr {
        struct geneve_opt options[];
 };
 
-#if IS_ENABLED(CONFIG_GENEVE)
-void geneve_get_rx_port(struct net_device *netdev);
-#else
 static inline void geneve_get_rx_port(struct net_device *netdev)
 {
+       ASSERT_RTNL();
+       call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev);
 }
-#endif
 
 #ifdef CONFIG_INET
 struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
index 97eafdc47eea118f6122b4a51a790d93a3830f86..a14093c70eabbb13d925b06ef4d72d4bd696f21c 100644 (file)
@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 
 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
                                       u8 name_assign_type);
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+                    bool *csum_err);
+
+static inline int gre_calc_hlen(__be16 o_flags)
+{
+       int addend = 4;
+
+       if (o_flags & TUNNEL_CSUM)
+               addend += 4;
+       if (o_flags & TUNNEL_KEY)
+               addend += 4;
+       if (o_flags & TUNNEL_SEQ)
+               addend += 4;
+       return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+       __be16 tflags = 0;
+
+       if (flags & GRE_CSUM)
+               tflags |= TUNNEL_CSUM;
+       if (flags & GRE_ROUTING)
+               tflags |= TUNNEL_ROUTING;
+       if (flags & GRE_KEY)
+               tflags |= TUNNEL_KEY;
+       if (flags & GRE_SEQ)
+               tflags |= TUNNEL_SEQ;
+       if (flags & GRE_STRICT)
+               tflags |= TUNNEL_STRICT;
+       if (flags & GRE_REC)
+               tflags |= TUNNEL_REC;
+       if (flags & GRE_VERSION)
+               tflags |= TUNNEL_VERSION;
+
+       return tflags;
+}
+
+static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+{
+       __be16 flags = 0;
+
+       if (tflags & TUNNEL_CSUM)
+               flags |= GRE_CSUM;
+       if (tflags & TUNNEL_ROUTING)
+               flags |= GRE_ROUTING;
+       if (tflags & TUNNEL_KEY)
+               flags |= GRE_KEY;
+       if (tflags & TUNNEL_SEQ)
+               flags |= GRE_SEQ;
+       if (tflags & TUNNEL_STRICT)
+               flags |= GRE_STRICT;
+       if (tflags & TUNNEL_REC)
+               flags |= GRE_REC;
+       if (tflags & TUNNEL_VERSION)
+               flags |= GRE_VERSION;
+
+       return flags;
+}
+
+static inline __sum16 gre_checksum(struct sk_buff *skb)
+{
+       __wsum csum;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               csum = lco_csum(skb);
+       else
+               csum = skb_checksum(skb, 0, skb->len, 0);
+       return csum_fold(csum);
+}
+
+static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
+                                   __be16 flags, __be16 proto,
+                                   __be32 key, __be32 seq)
+{
+       struct gre_base_hdr *greh;
+
+       skb_push(skb, hdr_len);
+
+       skb_reset_transport_header(skb);
+       greh = (struct gre_base_hdr *)skb->data;
+       greh->flags = gre_tnl_flags_to_gre_flags(flags);
+       greh->protocol = proto;
+
+       if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+               if (flags & TUNNEL_SEQ) {
+                       *ptr = seq;
+                       ptr--;
+               }
+               if (flags & TUNNEL_KEY) {
+                       *ptr = key;
+                       ptr--;
+               }
+               if (flags & TUNNEL_CSUM &&
+                   !(skb_shinfo(skb)->gso_type &
+                     (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = gre_checksum(skb);
+               }
+       }
+}
+
 #endif
diff --git a/include/net/gtp.h b/include/net/gtp.h
new file mode 100644 (file)
index 0000000..894a37b
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef _GTP_H_
+#define _GTP_H
+
+/* General GTP protocol related definitions. */
+
+#define GTP0_PORT      3386
+#define GTP1U_PORT     2152
+
+#define GTP_TPDU       255
+
+struct gtp0_header {   /* According to GSM TS 09.60. */
+       __u8    flags;
+       __u8    type;
+       __be16  length;
+       __be16  seq;
+       __be16  flow;
+       __u8    number;
+       __u8    spare[3];
+       __be64  tid;
+} __attribute__ ((packed));
+
+struct gtp1_header {   /* According to 3GPP TS 29.060. */
+       __u8    flags;
+       __u8    type;
+       __be16  length;
+       __be32  tid;
+} __attribute__ ((packed));
+
+#define GTP1_F_NPDU    0x01
+#define GTP1_F_SEQ     0x02
+#define GTP1_F_EXTHDR  0x04
+#define GTP1_F_MASK    0x07
+
+#endif
index 970028e13382807108764f8784d9b1f11d7078d7..3ef2743a8eecc742d05b369914af22111122af4f 100644 (file)
@@ -30,9 +30,9 @@ struct icmp_err {
 
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)     SNMP_INC_STATS((net)->mib.icmp_statistics, field)
-#define ICMP_INC_STATS_BH(net, field)  SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
+#define __ICMP_INC_STATS(net, field)   __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGIN_INC_STATS(net, field)                SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
index 93725e5467582232b68ea23bddaf063786d3ba4a..37165fba3741ac68e5a93a8a22473eae70361e45 100644 (file)
@@ -36,6 +36,7 @@
 struct sock;
 
 struct inet_skb_parm {
+       int                     iif;
        struct ip_options       opt;            /* Compiled IP options          */
        unsigned char           flags;
 
@@ -187,17 +188,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           unsigned int len);
 
 #define IP_INC_STATS(net, field)       SNMP_INC_STATS64((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field)    SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define __IP_INC_STATS(net, field)     __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
 #define IP_ADD_STATS(net, field, val)  SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
 #define NET_INC_STATS(net, field)      SNMP_INC_STATS((net)->mib.net_statistics, field)
-#define NET_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
-#define NET_INC_STATS_USER(net, field)         SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define __NET_INC_STATS(net, field)    __SNMP_INC_STATS((net)->mib.net_statistics, field)
 #define NET_ADD_STATS(net, field, adnd)        SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
 
 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
 unsigned long snmp_fold_field(void __percpu *mib, int offt);
index 295d291269e2c88ed4930041597a28f7c9a7a2f7..54c779416eec654b31d278a3bd77f07b0a3e4ea8 100644 (file)
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr, bool anycast);
 
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+                              int flags);
+
 /*
  *     support functions for ND
  *
index 499a707765ea68fa010e03716319dcef57de49fe..fb9e0153f4f247731f52d3c1ce42ee49f2bbeb08 100644 (file)
@@ -42,6 +42,7 @@ struct ip6_tnl {
        struct __ip6_tnl_parm parms;    /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
        struct dst_cache dst_cache;     /* cached dst */
+       struct gro_cells gro_cells;
 
        int err_count;
        unsigned long err_time;
@@ -49,8 +50,10 @@ struct ip6_tnl {
        /* These fields used only by GRE */
        __u32 i_seqno;  /* The last seen seqno  */
        __u32 o_seqno;  /* The last output seqno */
-       int hlen;       /* Precalculated GRE header length */
+       int hlen;       /* tun_hlen + encap_hlen */
+       int tun_hlen;   /* Precalculated header length */
        int mlink;
+
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -63,13 +66,19 @@ struct ipv6_tlv_tnl_enc_lim {
 
 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
                const struct in6_addr *raddr);
+int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+               const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+               bool log_ecn_error);
 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
                     const struct in6_addr *raddr);
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+                struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
                             const struct in6_addr *raddr);
 struct net *ip6_tnl_get_link_net(const struct net_device *dev);
 int ip6_tnl_get_iflink(const struct net_device *dev);
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
 
 #ifdef CONFIG_INET
 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
index 16435d8b1f935180d42dad87e962f64575de1b48..d916b43159033f33ad8f5ad59b0434540ccfe51f 100644 (file)
@@ -105,24 +105,23 @@ struct ip_tunnel {
        struct net_device       *dev;
        struct net              *net;   /* netns for packet i/o */
 
-       int             err_count;      /* Number of arrived ICMP errors */
        unsigned long   err_time;       /* Time when the last ICMP error
                                         * arrived */
+       int             err_count;      /* Number of arrived ICMP errors */
 
        /* These four fields used only by GRE */
        u32             i_seqno;        /* The last seen seqno  */
        u32             o_seqno;        /* The last output seqno */
        int             tun_hlen;       /* Precalculated header length */
-       int             mlink;
 
        struct dst_cache dst_cache;
 
        struct ip_tunnel_parm parms;
 
+       int             mlink;
        int             encap_hlen;     /* Encap header length (FOU,GUE) */
-       struct ip_tunnel_encap encap;
-
        int             hlen;           /* tun_hlen + encap_hlen */
+       struct ip_tunnel_encap encap;
 
        /* for SIT */
 #ifdef CONFIG_IPV6_SIT_6RD
@@ -161,6 +160,7 @@ struct tnl_ptk_info {
 
 #define PACKET_RCVD    0
 #define PACKET_REJECT  1
+#define PACKET_NEXT    2
 
 #define IP_TNL_HASH_BITS   7
 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
@@ -310,7 +310,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
                                             gfp_t flags);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
 
 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
 {
index a6cc576fd467f879054c344c24dede5010c72992..af4c10ebb2414494e75c279b3d9c91da48442982 100644 (file)
@@ -731,6 +731,12 @@ struct ip_vs_pe {
        u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
                           bool inverse);
        int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
+       /* create connections for real-server outgoing packets */
+       struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
+                                      struct ip_vs_dest *dest,
+                                      struct sk_buff *skb,
+                                      const struct ip_vs_iphdr *iph,
+                                      __be16 dport, __be16 cport);
 };
 
 /* The application module object (a.k.a. app incarnation) */
@@ -874,6 +880,7 @@ struct netns_ipvs {
        /* Service counters */
        atomic_t                ftpsvc_counter;
        atomic_t                nullsvc_counter;
+       atomic_t                conn_out_counter;
 
 #ifdef CONFIG_SYSCTL
        /* 1/rate drop and drop-entry variables */
@@ -1147,6 +1154,12 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
  */
 const char *ip_vs_proto_name(unsigned int proto);
 void ip_vs_init_hash_table(struct list_head *table, int rows);
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+                                     struct ip_vs_dest *dest,
+                                     struct sk_buff *skb,
+                                     const struct ip_vs_iphdr *iph,
+                                     __be16 dport,
+                                     __be16 cport);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP     1
@@ -1378,6 +1391,10 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
                            const union nf_inet_addr *daddr, __be16 dport);
 
+struct ip_vs_dest *
+ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+                       const union nf_inet_addr *daddr, __be16 dport);
+
 int ip_vs_use_count_inc(void);
 void ip_vs_use_count_dec(void);
 int ip_vs_register_nl_ioctl(void);
index 55ee1eb7d026df7d56b6ad73b415d912db143373..11a045281948f07e638deb4515e60eaaf8c70774 100644 (file)
@@ -121,21 +121,21 @@ struct frag_hdr {
 extern int sysctl_mld_max_msf;
 extern int sysctl_mld_qrv;
 
-#define _DEVINC(net, statname, modifier, idev, field)                  \
+#define _DEVINC(net, statname, mod, idev, field)                       \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
        if (likely(_idev != NULL))                                      \
-               SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
-       SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+               mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
+       mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
 })
 
 /* per device counters are atomic_long_t */
-#define _DEVINCATOMIC(net, statname, modifier, idev, field)            \
+#define _DEVINCATOMIC(net, statname, mod, idev, field)                 \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
        if (likely(_idev != NULL))                                      \
                SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
-       SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+       mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
 })
 
 /* per device and per net counters are atomic_long_t */
@@ -147,46 +147,44 @@ extern int sysctl_mld_qrv;
        SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
 })
 
-#define _DEVADD(net, statname, modifier, idev, field, val)             \
+#define _DEVADD(net, statname, mod, idev, field, val)                  \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
        if (likely(_idev != NULL))                                      \
-               SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
-       SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
+               mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
+       mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
 })
 
-#define _DEVUPD(net, statname, modifier, idev, field, val)             \
+#define _DEVUPD(net, statname, mod, idev, field, val)                  \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
        if (likely(_idev != NULL))                                      \
-               SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
-       SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
+               mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
+       mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
 })
 
 /* MIBs */
 
 #define IP6_INC_STATS(net, idev,field)         \
-               _DEVINC(net, ipv6, 64, idev, field)
-#define IP6_INC_STATS_BH(net, idev,field)      \
-               _DEVINC(net, ipv6, 64_BH, idev, field)
+               _DEVINC(net, ipv6, , idev, field)
+#define __IP6_INC_STATS(net, idev,field)       \
+               _DEVINC(net, ipv6, __, idev, field)
 #define IP6_ADD_STATS(net, idev,field,val)     \
-               _DEVADD(net, ipv6, 64, idev, field, val)
-#define IP6_ADD_STATS_BH(net, idev,field,val)  \
-               _DEVADD(net, ipv6, 64_BH, idev, field, val)
+               _DEVADD(net, ipv6, , idev, field, val)
+#define __IP6_ADD_STATS(net, idev,field,val)   \
+               _DEVADD(net, ipv6, __, idev, field, val)
 #define IP6_UPD_PO_STATS(net, idev,field,val)   \
-               _DEVUPD(net, ipv6, 64, idev, field, val)
-#define IP6_UPD_PO_STATS_BH(net, idev,field,val)   \
-               _DEVUPD(net, ipv6, 64_BH, idev, field, val)
+               _DEVUPD(net, ipv6, , idev, field, val)
+#define __IP6_UPD_PO_STATS(net, idev,field,val)   \
+               _DEVUPD(net, ipv6, __, idev, field, val)
 #define ICMP6_INC_STATS(net, idev, field)      \
                _DEVINCATOMIC(net, icmpv6, , idev, field)
-#define ICMP6_INC_STATS_BH(net, idev, field)   \
-               _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
+#define __ICMP6_INC_STATS(net, idev, field)    \
+               _DEVINCATOMIC(net, icmpv6, __, idev, field)
 
 #define ICMP6MSGOUT_INC_STATS(net, idev, field)                \
        _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field)     \
-       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGIN_INC_STATS_BH(net, idev, field)      \
+#define ICMP6MSGIN_INC_STATS(net, idev, field) \
        _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
 
 struct ip6_ra_chain {
@@ -253,6 +251,13 @@ struct ipv6_fl_socklist {
        struct rcu_head                 rcu;
 };
 
+struct ipcm6_cookie {
+       __s16 hlimit;
+       __s16 tclass;
+       __s8  dontfrag;
+       struct ipv6_txoptions *opt;
+};
+
 static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
 {
        struct ipv6_txoptions *opt;
@@ -865,9 +870,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
 int ip6_append_data(struct sock *sk,
                    int getfrag(void *from, char *to, int offset, int len,
                                int odd, struct sk_buff *skb),
-                   void *from, int length, int transhdrlen, int hlimit,
-                   int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
-                   struct rt6_info *rt, unsigned int flags, int dontfrag,
+                   void *from, int length, int transhdrlen,
+                   struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+                   struct rt6_info *rt, unsigned int flags,
                    const struct sockcm_cookie *sockc);
 
 int ip6_push_pending_frames(struct sock *sk);
@@ -883,9 +888,8 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
                             void *from, int length, int transhdrlen,
-                            int hlimit, int tclass, struct ipv6_txoptions *opt,
-                            struct flowi6 *fl6, struct rt6_info *rt,
-                            unsigned int flags, int dontfrag,
+                            struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+                            struct rt6_info *rt, unsigned int flags,
                             const struct sockcm_cookie *sockc);
 
 static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
@@ -961,6 +965,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
                                 int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
 
 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                    int *addr_len);
index c43a9c73de5e972eeacb99ef5ae1b0a1fd2e1af0..374388dc01c8d64a4ea4220bd877aa88e3203160 100644 (file)
@@ -25,6 +25,8 @@
 
 struct l3mdev_ops {
        u32             (*l3mdev_fib_table)(const struct net_device *dev);
+       struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
+                                         struct sk_buff *skb, u16 proto);
 
        /* IPv4 ops */
        struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
@@ -130,51 +132,36 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
        return rc;
 }
 
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
-                                  struct flowi4 *fl4)
-{
-       struct net_device *dev;
-       int rc = 0;
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
 
-       if (ifindex) {
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6);
 
-               rcu_read_lock();
+static inline
+struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
+{
+       struct net_device *master = NULL;
 
-               dev = dev_get_by_index_rcu(net, ifindex);
-               if (dev && netif_is_l3_master(dev) &&
-                   dev->l3mdev_ops->l3mdev_get_saddr) {
-                       rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
-               }
+       if (netif_is_l3_slave(skb->dev))
+               master = netdev_master_upper_dev_get_rcu(skb->dev);
+       else if (netif_is_l3_master(skb->dev))
+               master = skb->dev;
 
-               rcu_read_unlock();
-       }
+       if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+               skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
 
-       return rc;
+       return skb;
 }
 
-static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
-                                                  const struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
 {
-       if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
-               return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
-
-       return NULL;
+       return l3mdev_l3_rcv(skb, AF_INET);
 }
 
 static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
-                                       const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
 {
-       struct dst_entry *dst = NULL;
-       struct net_device *dev;
-
-       dev = dev_get_by_index(net, fl6->flowi6_oif);
-       if (dev) {
-               dst = l3mdev_get_rt6_dst(dev, fl6);
-               dev_put(dev);
-       }
-
-       return dst;
+       return l3mdev_l3_rcv(skb, AF_INET6);
 }
 
 #else
@@ -233,16 +220,21 @@ static inline int l3mdev_get_saddr(struct net *net, int ifindex,
 }
 
 static inline
-struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
-                                    const struct flowi6 *fl6)
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6)
 {
        return NULL;
 }
+
 static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
-                                       const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
 {
-       return NULL;
+       return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+       return skb;
 }
 #endif
 
index a3ee7655979161c65d04eb80cb441f3de83de1b9..07ef9378df2be78dc6617e8d4e02dad405c0f6a7 100644 (file)
@@ -549,7 +549,7 @@ struct ieee80211_bss_conf {
        u8 sync_dtim_count;
        u32 basic_rates;
        struct ieee80211_rate *beacon_rate;
-       int mcast_rate[IEEE80211_NUM_BANDS];
+       int mcast_rate[NUM_NL80211_BANDS];
        u16 ht_operation_mode;
        s32 cqm_rssi_thold;
        u32 cqm_rssi_hyst;
@@ -938,8 +938,8 @@ struct ieee80211_tx_info {
  * @common_ie_len: length of the common_ies
  */
 struct ieee80211_scan_ies {
-       const u8 *ies[IEEE80211_NUM_BANDS];
-       size_t len[IEEE80211_NUM_BANDS];
+       const u8 *ies[NUM_NL80211_BANDS];
+       size_t len[NUM_NL80211_BANDS];
        const u8 *common_ies;
        size_t common_ie_len;
 };
@@ -1007,6 +1007,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  *     flag indicates that the PN was verified for replay protection.
  *     Note that this flag is also currently only supported when a frame
  *     is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
+ * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
+ *     de-duplication by itself.
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *     the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -1752,7 +1754,7 @@ struct ieee80211_sta_rates {
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
-       u32 supp_rates[IEEE80211_NUM_BANDS];
+       u32 supp_rates[NUM_NL80211_BANDS];
        u8 addr[ETH_ALEN];
        u16 aid;
        struct ieee80211_sta_ht_cap ht_cap;
@@ -4402,7 +4404,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
  */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
-                                       enum ieee80211_band band,
+                                       enum nl80211_band band,
                                        size_t frame_len,
                                        struct ieee80211_rate *rate);
 
@@ -5355,7 +5357,7 @@ struct rate_control_ops {
 };
 
 static inline int rate_supported(struct ieee80211_sta *sta,
-                                enum ieee80211_band band,
+                                enum nl80211_band band,
                                 int index)
 {
        return (sta == NULL || sta->supp_rates[band] & BIT(index));
index 6cd7a70706a90a0427920fa2820c5493a2d3f9fb..e465c8551ac3c08061406b521ce13be4c80fdbbc 100644 (file)
@@ -287,6 +287,16 @@ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
        put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst);
 }
 
+/**
+ * ieee802154_be16_to_le16 - copies and convert be16 to le16
+ * @le16_dst: le16 destination pointer
+ * @be16_src: be16 source pointer
+ */
+static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src)
+{
+       put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst);
+}
+
 /**
  * ieee802154_alloc_hw - Allocate a new hardware device
  *
index fde4068eec0b2963ca7155503ecae631e925fba9..dd78bea227c8b0baf2fd14eb878f26f265ac76e0 100644 (file)
@@ -289,8 +289,6 @@ struct kernel_param;
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
-extern unsigned int nf_conntrack_hash_rnd;
-void init_nf_conntrack_hash_rnd(void);
 
 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 const struct nf_conntrack_zone *zone,
index 62e17d1319ff7423dbcf176815f04cecfcdf3371..3e2f3328945cca94b94411187cec58ee3cb4e9ae 100644 (file)
@@ -81,6 +81,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
 
 #define CONNTRACK_LOCKS 1024
 
+extern struct hlist_nulls_head *nf_conntrack_hash;
 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
 void nf_conntrack_lock(spinlock_t *lock);
 
index 57c880378443d42d1e568f717922434afeb00974..fa36447371c648e5a3f15942216d7c6fe153a639 100644 (file)
@@ -73,6 +73,8 @@ void nf_conntrack_unregister_notifier(struct net *net,
                                      struct nf_ct_event_notifier *nb);
 
 void nf_ct_deliver_cached_events(struct nf_conn *ct);
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+                                 u32 portid, int report);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -90,70 +92,26 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
        set_bit(event, &e->cache);
 }
 
-static inline int
-nf_conntrack_eventmask_report(unsigned int eventmask,
-                             struct nf_conn *ct,
-                             u32 portid,
-                             int report)
-{
-       int ret = 0;
-       struct net *net = nf_ct_net(ct);
-       struct nf_ct_event_notifier *notify;
-       struct nf_conntrack_ecache *e;
-
-       rcu_read_lock();
-       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
-       if (notify == NULL)
-               goto out_unlock;
-
-       e = nf_ct_ecache_find(ct);
-       if (e == NULL)
-               goto out_unlock;
-
-       if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
-               struct nf_ct_event item = {
-                       .ct     = ct,
-                       .portid = e->portid ? e->portid : portid,
-                       .report = report
-               };
-               /* This is a resent of a destroy event? If so, skip missed */
-               unsigned long missed = e->portid ? 0 : e->missed;
-
-               if (!((eventmask | missed) & e->ctmask))
-                       goto out_unlock;
-
-               ret = notify->fcn(eventmask | missed, &item);
-               if (unlikely(ret < 0 || missed)) {
-                       spin_lock_bh(&ct->lock);
-                       if (ret < 0) {
-                               /* This is a destroy event that has been
-                                * triggered by a process, we store the PORTID
-                                * to include it in the retransmission. */
-                               if (eventmask & (1 << IPCT_DESTROY) &&
-                                   e->portid == 0 && portid != 0)
-                                       e->portid = portid;
-                               else
-                                       e->missed |= eventmask;
-                       } else
-                               e->missed &= ~missed;
-                       spin_unlock_bh(&ct->lock);
-               }
-       }
-out_unlock:
-       rcu_read_unlock();
-       return ret;
-}
-
 static inline int
 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
                          u32 portid, int report)
 {
+       const struct net *net = nf_ct_net(ct);
+
+       if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+               return 0;
+
        return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
 }
 
 static inline int
 nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
 {
+       const struct net *net = nf_ct_net(ct);
+
+       if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+               return 0;
+
        return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
 }
 
@@ -172,43 +130,9 @@ int nf_ct_expect_register_notifier(struct net *net,
 void nf_ct_expect_unregister_notifier(struct net *net,
                                      struct nf_exp_event_notifier *nb);
 
-static inline void
-nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
-                         struct nf_conntrack_expect *exp,
-                         u32 portid,
-                         int report)
-{
-       struct net *net = nf_ct_exp_net(exp);
-       struct nf_exp_event_notifier *notify;
-       struct nf_conntrack_ecache *e;
-
-       rcu_read_lock();
-       notify = rcu_dereference(net->ct.nf_expect_event_cb);
-       if (notify == NULL)
-               goto out_unlock;
-
-       e = nf_ct_ecache_find(exp->master);
-       if (e == NULL)
-               goto out_unlock;
-
-       if (e->expmask & (1 << event)) {
-               struct nf_exp_event item = {
-                       .exp    = exp,
-                       .portid = portid,
-                       .report = report
-               };
-               notify->fcn(1 << event, &item);
-       }
-out_unlock:
-       rcu_read_unlock();
-}
-
-static inline void
-nf_ct_expect_event(enum ip_conntrack_expect_events event,
-                  struct nf_conntrack_expect *exp)
-{
-       nf_ct_expect_event_report(event, exp, 0, 0);
-}
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+                              struct nf_conntrack_expect *exp,
+                              u32 portid, int report);
 
 int nf_conntrack_ecache_pernet_init(struct net *net);
 void nf_conntrack_ecache_pernet_fini(struct net *net);
@@ -245,8 +169,6 @@ static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
                                            u32 portid,
                                            int report) { return 0; }
 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
-                                     struct nf_conntrack_expect *exp) {}
 static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
                                             struct nf_conntrack_expect *exp,
                                             u32 portid,
index dce56f09ac9aed9c0f7d4a1d99acc9602e33ea5f..5ed33ea4718ef5a101689432825ebb1a48fcc0d9 100644 (file)
@@ -10,6 +10,7 @@
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
+extern struct hlist_head *nf_ct_expect_hash;
 
 struct nf_conntrack_expect {
        /* Conntrack expectation list member */
index 956d8a6ac06935bc151a74ead1090268378eff70..1a5fb36f165fd699e93a52c186f65a14615d0f42 100644 (file)
@@ -23,6 +23,9 @@ struct nf_conntrack_l4proto {
        /* L4 Protocol number. */
        u_int8_t l4proto;
 
+       /* Resolve clashes on insertion races. */
+       bool allow_clash;
+
        /* Try to fill in the third arg: dataoff is offset past network protocol
            hdr.  Return true if possible. */
        bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
index 7e2b1d025f5037fc764a8376693680546c3835f6..c5f8fc736b3d55366e4886e78b41cc24eadd9954 100644 (file)
@@ -45,7 +45,6 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
 #endif
 }
 
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
 int nf_connlabel_set(struct nf_conn *ct, u16 bit);
 
 int nf_connlabels_replace(struct nf_conn *ct,
@@ -54,11 +53,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
 #ifdef CONFIG_NF_CONNTRACK_LABELS
 int nf_conntrack_labels_init(void);
 void nf_conntrack_labels_fini(void);
-int nf_connlabels_get(struct net *net, unsigned int n_bits);
+int nf_connlabels_get(struct net *net, unsigned int bit);
 void nf_connlabels_put(struct net *net);
 #else
 static inline int nf_conntrack_labels_init(void) { return 0; }
 static inline void nf_conntrack_labels_fini(void) {}
-static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
 static inline void nf_connlabels_put(struct net *net) {}
 #endif
index f6b1daf2e69871fa26bd18d56225c0af06017ff1..092235458691fd5eadc09cab3266d100f7140dc6 100644 (file)
@@ -303,7 +303,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
 struct nft_set {
        struct list_head                list;
        struct list_head                bindings;
-       char                            name[IFNAMSIZ];
+       char                            name[NFT_SET_MAXNAMELEN];
        u32                             ktype;
        u32                             dtype;
        u32                             size;
index 0e31727517556dabdc929c496e9a5e6c995de068..e589cb3dcceede6d4bc6e33bdc5b30830a06ba4d 100644 (file)
  *   nla_put_s8(skb, type, value)      add s8 attribute to skb
  *   nla_put_s16(skb, type, value)     add s16 attribute to skb
  *   nla_put_s32(skb, type, value)     add s32 attribute to skb
- *   nla_put_s64(skb, type, value)     add s64 attribute to skb
+ *   nla_put_s64(skb, type, value,
+ *               padattr)              add s64 attribute to skb
  *   nla_put_string(skb, type, str)    add string attribute to skb
  *   nla_put_flag(skb, type)           add flag attribute to skb
- *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ *   nla_put_msecs(skb, type, jiffies,
+ *                 padattr)            add msecs attribute to skb
  *   nla_put_in_addr(skb, type, addr)  add IPv4 address attribute to skb
  *   nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
  *
@@ -244,13 +246,21 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count);
 int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
 int nla_strcmp(const struct nlattr *nla, const char *str);
 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+                                  int attrlen, int padattr);
 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+                                int attrlen, int padattr);
 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
               const void *data);
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+                    const void *data, int padattr);
 void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+                 const void *data, int padattr);
 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
 int nla_append(struct sk_buff *skb, int attrlen, const void *data);
 
@@ -848,36 +858,56 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
 }
 
 /**
- * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+                                   u64 value, int padattr)
 {
-       return nla_put(skb, attrtype, sizeof(__be64), &value);
+       return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
 }
 
 /**
- * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+                              int padattr)
 {
-       return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+       return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
 }
 
 /**
- * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+                               int padattr)
 {
-       return nla_put(skb, attrtype, sizeof(__le64), &value);
+       return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+                           padattr);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ * @padattr: attribute type for the padding
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+                              int padattr)
+{
+       return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
 }
 
 /**
@@ -914,14 +944,16 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
 }
 
 /**
- * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+                             int padattr)
 {
-       return nla_put(skb, attrtype, sizeof(s64), &value);
+       return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
 }
 
 /**
@@ -947,16 +979,18 @@ static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
 }
 
 /**
- * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @njiffies: number of jiffies to convert to msecs
+ * @padattr: attribute type for the padding
  */
 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
-                               unsigned long njiffies)
+                               unsigned long njiffies, int padattr)
 {
        u64 tmp = jiffies_to_msecs(njiffies);
-       return nla_put(skb, attrtype, sizeof(u64), &tmp);
+
+       return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
 }
 
 /**
@@ -1230,6 +1264,61 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
        return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
 }
 
+/**
+ * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
+ * @skb: socket buffer the message is stored in
+ *
+ * Return true if padding is needed to align the next attribute (nla_data()) to
+ * a 64-bit aligned area.
+ */
+static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       /* The nlattr header is 4 bytes in size, that's why we test
+        * if the skb->data _is_ aligned.  A NOP attribute, plus
+        * nlattr header for next attribute, will make nla_data()
+        * 8-byte aligned.
+        */
+       if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
+               return true;
+#endif
+       return false;
+}
+
+/**
+ * nla_align_64bit - 64-bit align the nla_data() of next attribute
+ * @skb: socket buffer the message is stored in
+ * @padattr: attribute type for the padding
+ *
+ * Conditionally emit a padding netlink attribute in order to make
+ * the next attribute we emit have a 64-bit aligned nla_data() area.
+ * This will only be done in architectures which do not have
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
+ *
+ * Returns zero on success or a negative error code.
+ */
+static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
+{
+       if (nla_need_padding_for_64bit(skb) &&
+           !nla_reserve(skb, padattr, 0))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+/**
+ * nla_total_size_64bit - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size_64bit(int payload)
+{
+       return NLA_ALIGN(nla_attr_size(payload))
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+               + NLA_ALIGN(nla_attr_size(0))
+#endif
+               ;
+}
+
 /**
  * nla_for_each_attr - iterate over a stream of attributes
  * @pos: loop counter, set to current attribute
index 723b61c82b3f444aee0e7591b8028cb2548e228e..38b1a80517f07152a25a1784bb95b637a9376ffd 100644 (file)
@@ -84,7 +84,6 @@ struct netns_ct {
        struct ctl_table_header *event_sysctl_header;
        struct ctl_table_header *helper_sysctl_header;
 #endif
-       char                    *slabname;
        unsigned int            sysctl_log_invalid; /* Log invalid packets */
        int                     sysctl_events;
        int                     sysctl_acct;
@@ -93,11 +92,6 @@ struct netns_ct {
        int                     sysctl_tstamp;
        int                     sysctl_checksum;
 
-       unsigned int            htable_size;
-       seqcount_t              generation;
-       struct kmem_cache       *nf_conntrack_cachep;
-       struct hlist_nulls_head *hash;
-       struct hlist_head       *expect_hash;
        struct ct_pcpu __percpu *pcpu_lists;
        struct ip_conntrack_stat __percpu *stat;
        struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
@@ -107,9 +101,5 @@ struct netns_ct {
        unsigned int            labels_used;
        u8                      label_words;
 #endif
-#ifdef CONFIG_NF_NAT_NEEDED
-       struct hlist_head       *nat_bysource;
-       unsigned int            nat_htable_size;
-#endif
 };
 #endif
index a69cde3ce4608879ee3e4056d22ce512eb0c3ba5..d061ffeb1e71204c490abed907004804036030e0 100644 (file)
@@ -132,6 +132,9 @@ struct netns_ipv4 {
        struct list_head        mr_tables;
        struct fib_rules_ops    *mr_rules_ops;
 #endif
+#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+       int sysctl_fib_multipath_use_neigh;
 #endif
        atomic_t        rt_genid;
 };
index 730d82ad6ee521beddc3af344d8e99a339f74dd5..24cd3949a9a4f04f78ff31ce681d71833f1bbe54 100644 (file)
@@ -80,6 +80,7 @@ struct netns_xfrm {
        struct flow_cache       flow_cache_global;
        atomic_t                flow_cache_genid;
        struct list_head        flow_cache_gc_list;
+       atomic_t                flow_cache_gc_count;
        spinlock_t              flow_cache_gc_lock;
        struct work_struct      flow_cache_gc_work;
        struct work_struct      flow_cache_flush_work;
index 32cb3e591e07b6a936a14788952c737c9bb277bd..fcab4de49951e91515ee097401815e93ae612755 100644 (file)
@@ -138,6 +138,8 @@ enum nl802154_attrs {
        NL802154_ATTR_SEC_KEY,
 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 
+       NL802154_ATTR_PAD,
+
        __NL802154_ATTR_AFTER_LAST,
        NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
 };
@@ -295,6 +297,7 @@ enum nl802154_dev_addr_attrs {
        NL802154_DEV_ADDR_ATTR_MODE,
        NL802154_DEV_ADDR_ATTR_SHORT,
        NL802154_DEV_ADDR_ATTR_EXTENDED,
+       NL802154_DEV_ADDR_ATTR_PAD,
 
        /* keep last */
        __NL802154_DEV_ADDR_ATTR_AFTER_LAST,
@@ -320,6 +323,7 @@ enum nl802154_key_id_attrs {
        NL802154_KEY_ID_ATTR_IMPLICIT,
        NL802154_KEY_ID_ATTR_SOURCE_SHORT,
        NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+       NL802154_KEY_ID_ATTR_PAD,
 
        /* keep last */
        __NL802154_KEY_ID_ATTR_AFTER_LAST,
@@ -402,6 +406,7 @@ enum nl802154_dev {
        NL802154_DEV_ATTR_EXTENDED_ADDR,
        NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
        NL802154_DEV_ATTR_KEY_MODE,
+       NL802154_DEV_ATTR_PAD,
 
        /* keep last */
        __NL802154_DEV_ATTR_AFTER_LAST,
@@ -414,6 +419,7 @@ enum nl802154_devkey {
        NL802154_DEVKEY_ATTR_FRAME_COUNTER,
        NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
        NL802154_DEVKEY_ATTR_ID,
+       NL802154_DEVKEY_ATTR_PAD,
 
        /* keep last */
        __NL802154_DEVKEY_ATTR_AFTER_LAST,
index f4b11eee1754a78c76f15b76537b295f37ae6ac3..ad777d79af945877a9a8214748f5cd6b28824028 100644 (file)
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                            unsigned int flags, u16 type,
+                            bool nopolicy, bool noxfrm, bool will_cache);
 
 struct in_ifaddr;
 void fib_add_ifaddr(struct in_ifaddr *);
index 2f87c1ba13de639df7b733ad3905c7e47b8d901b..006a7b81d758a29119a3d7e6458db15f83aa66e6 100644 (file)
@@ -47,6 +47,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
  *     @get_num_rx_queues: Function to determine number of receive queues
  *                         to create when creating a new device.
  *     @get_link_net: Function to get the i/o netns of the device
+ *     @get_linkxstats_size: Function to calculate the required room for
+ *                           dumping device-specific extended link stats
+ *     @fill_linkxstats: Function to dump device-specific extended link stats
  */
 struct rtnl_link_ops {
        struct list_head        list;
@@ -95,6 +98,10 @@ struct rtnl_link_ops {
                                                   const struct net_device *dev,
                                                   const struct net_device *slave_dev);
        struct net              *(*get_link_net)(const struct net_device *dev);
+       size_t                  (*get_linkxstats_size)(const struct net_device *dev);
+       int                     (*fill_linkxstats)(struct sk_buff *skb,
+                                                  const struct net_device *dev,
+                                                  int *prividx);
 };
 
 int __rtnl_link_register(struct rtnl_link_ops *ops);
index 65521cfdcadeee35d61f280165a387cc2164ab6d..b392ac8382f2bf0be118f797a4444cc0eb4ddeb5 100644 (file)
@@ -116,6 +116,22 @@ extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
+int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+                       struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+                       struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+                                 struct net *net,
+                                 const union sctp_addr *laddr,
+                                 const union sctp_addr *paddr, void *p);
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+                           struct net *net, int pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+                      struct sctp_info *info);
+
 /*
  * sctp/primitive.c
  */
@@ -189,10 +205,9 @@ extern int sysctl_sctp_wmem[3];
  */
 
 /* SCTP SNMP MIB stats handlers */
-#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
-#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)     SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field)   __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)     SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 /* sctp mib definitions */
 enum {
@@ -359,21 +374,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
 #define sctp_skb_for_each(pos, head, tmp) \
        skb_queue_walk_safe(head, pos, tmp)
 
-/* A helper to append an entire skb list (list) to another (head). */
-static inline void sctp_skb_list_tail(struct sk_buff_head *list,
-                                     struct sk_buff_head *head)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&head->lock, flags);
-       spin_lock(&list->lock);
-
-       skb_queue_splice_tail_init(list, head);
-
-       spin_unlock(&list->lock);
-       spin_unlock_irqrestore(&head->lock, flags);
-}
-
 /**
  *     sctp_list_dequeue - remove from the head of the queue
  *     @list: list to dequeue from
@@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
 {
        struct list_head *result = NULL;
 
-       if (list->next != list) {
+       if (!list_empty(list)) {
                result = list->next;
-               list->next = result->next;
-               list->next->prev = list;
-               INIT_LIST_HEAD(result);
+               list_del_init(result);
        }
        return result;
 }
index 6df1ce7a411c548bda4163840a90578b6e1b4cfe..16b013a6191cf1c416e4dd1aeb1707a8569ea49b 100644 (file)
@@ -210,14 +210,15 @@ struct sctp_sock {
        int user_frag;
 
        __u32 autoclose;
-       __u8 nodelay;
-       __u8 disable_fragments;
-       __u8 v4mapped;
-       __u8 frag_interleave;
        __u32 adaptation_ind;
        __u32 pd_point;
-       __u8 recvrcvinfo;
-       __u8 recvnxtinfo;
+       __u16   nodelay:1,
+               disable_fragments:1,
+               v4mapped:1,
+               frag_interleave:1,
+               recvrcvinfo:1,
+               recvnxtinfo:1,
+               data_ready_signalled:1;
 
        atomic_t pd_mode;
        /* Receive to here while partial delivery is in effect. */
@@ -847,6 +848,11 @@ struct sctp_transport {
         */
        ktime_t last_time_heard;
 
+       /* When was the last time that we sent a chunk using this
+        * transport? We use this to check for idle transports
+        */
+       unsigned long last_time_sent;
+
        /* Last time(in jiffies) when cwnd is reduced due to the congestion
         * indication based on ECNE chunk.
         */
@@ -952,7 +958,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
                          struct sctp_sock *);
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
index 35512ac6dcfb186884a84a207c5ebab251c2b2db..c9228ad7ee91865cbeff19328e219171dd62b456 100644 (file)
@@ -123,12 +123,9 @@ struct linux_xfrm_mib {
 #define DECLARE_SNMP_STAT(type, name)  \
        extern __typeof__(type) __percpu *name
 
-#define SNMP_INC_STATS_BH(mib, field)  \
+#define __SNMP_INC_STATS(mib, field)   \
                        __this_cpu_inc(mib->mibs[field])
 
-#define SNMP_INC_STATS_USER(mib, field)        \
-                       this_cpu_inc(mib->mibs[field])
-
 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
                        atomic_long_inc(&mib->mibs[field])
 
@@ -138,12 +135,9 @@ struct linux_xfrm_mib {
 #define SNMP_DEC_STATS(mib, field)     \
                        this_cpu_dec(mib->mibs[field])
 
-#define SNMP_ADD_STATS_BH(mib, field, addend)  \
+#define __SNMP_ADD_STATS(mib, field, addend)   \
                        __this_cpu_add(mib->mibs[field], addend)
 
-#define SNMP_ADD_STATS_USER(mib, field, addend)        \
-                       this_cpu_add(mib->mibs[field], addend)
-
 #define SNMP_ADD_STATS(mib, field, addend)     \
                        this_cpu_add(mib->mibs[field], addend)
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
@@ -152,7 +146,7 @@ struct linux_xfrm_mib {
                this_cpu_inc(ptr[basefield##PKTS]);             \
                this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
-#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
+#define __SNMP_UPD_PO_STATS(mib, basefield, addend)    \
        do { \
                __typeof__((mib->mibs) + 0) ptr = mib->mibs;    \
                __this_cpu_inc(ptr[basefield##PKTS]);           \
@@ -162,7 +156,7 @@ struct linux_xfrm_mib {
 
 #if BITS_PER_LONG==32
 
-#define SNMP_ADD_STATS64_BH(mib, field, addend)                        \
+#define __SNMP_ADD_STATS64(mib, field, addend)                                 \
        do {                                                            \
                __typeof__(*mib) *ptr = raw_cpu_ptr(mib);               \
                u64_stats_update_begin(&ptr->syncp);                    \
@@ -170,20 +164,16 @@ struct linux_xfrm_mib {
                u64_stats_update_end(&ptr->syncp);                      \
        } while (0)
 
-#define SNMP_ADD_STATS64_USER(mib, field, addend)                      \
+#define SNMP_ADD_STATS64(mib, field, addend)                           \
        do {                                                            \
                local_bh_disable();                                     \
-               SNMP_ADD_STATS64_BH(mib, field, addend);                \
-               local_bh_enable();                                      \
+               __SNMP_ADD_STATS64(mib, field, addend);                 \
+               local_bh_enable();                              \
        } while (0)
 
-#define SNMP_ADD_STATS64(mib, field, addend)                           \
-               SNMP_ADD_STATS64_USER(mib, field, addend)
-
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)                 \
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend)                  \
        do {                                                            \
                __typeof__(*mib) *ptr;                          \
                ptr = raw_cpu_ptr((mib));                               \
@@ -195,19 +185,17 @@ struct linux_xfrm_mib {
 #define SNMP_UPD_PO_STATS64(mib, basefield, addend)                    \
        do {                                                            \
                local_bh_disable();                                     \
-               SNMP_UPD_PO_STATS64_BH(mib, basefield, addend);         \
-               local_bh_enable();                                      \
+               __SNMP_UPD_PO_STATS64(mib, basefield, addend);          \
+               local_bh_enable();                              \
        } while (0)
 #else
-#define SNMP_INC_STATS64_BH(mib, field)                SNMP_INC_STATS_BH(mib, field)
-#define SNMP_INC_STATS64_USER(mib, field)      SNMP_INC_STATS_USER(mib, field)
+#define __SNMP_INC_STATS64(mib, field)         __SNMP_INC_STATS(mib, field)
 #define SNMP_INC_STATS64(mib, field)           SNMP_INC_STATS(mib, field)
 #define SNMP_DEC_STATS64(mib, field)           SNMP_DEC_STATS(mib, field)
-#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
 #define SNMP_ADD_STATS64(mib, field, addend)   SNMP_ADD_STATS(mib, field, addend)
 #define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
 #endif
 
 #endif
index 81d6fecec0a2c0ec4af0bd8b1e2c87f12a68e555..c9c8b19df27c558354687119db60c0716909ea3f 100644 (file)
@@ -569,7 +569,7 @@ static inline bool __sk_del_node_init(struct sock *sk)
    modifications.
  */
 
-static inline void sock_hold(struct sock *sk)
+static __always_inline void sock_hold(struct sock *sk)
 {
        atomic_inc(&sk->sk_refcnt);
 }
@@ -577,7 +577,7 @@ static inline void sock_hold(struct sock *sk)
 /* Ungrab socket in the context, which assumes that socket refcnt
    cannot hit zero, f.e. it is true in context of any socketcall.
  */
-static inline void __sock_put(struct sock *sk)
+static __always_inline void __sock_put(struct sock *sk)
 {
        atomic_dec(&sk->sk_refcnt);
 }
@@ -630,12 +630,20 @@ static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 {
        sock_hold(sk);
-       hlist_add_head_rcu(&sk->sk_node, list);
+       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+           sk->sk_family == AF_INET6)
+               hlist_add_tail_rcu(&sk->sk_node, list);
+       else
+               hlist_add_head_rcu(&sk->sk_node, list);
 }
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+           sk->sk_family == AF_INET6)
+               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+       else
+               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -918,6 +926,17 @@ void sk_stream_kill_queues(struct sock *sk);
 void sk_set_memalloc(struct sock *sk);
 void sk_clear_memalloc(struct sock *sk);
 
+void __sk_flush_backlog(struct sock *sk);
+
+static inline bool sk_flush_backlog(struct sock *sk)
+{
+       if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
+               __sk_flush_backlog(sk);
+               return true;
+       }
+       return false;
+}
+
 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
 
 struct request_sock_ops;
@@ -1316,21 +1335,6 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
        __kfree_skb(skb);
 }
 
-/* Used by processes to "lock" a socket state, so that
- * interrupts and bottom half handlers won't change it
- * from under us. It essentially blocks any incoming
- * packets, so that we won't get any new data or any
- * packets that change the state of the socket.
- *
- * While locked, BH processing will add new packets to
- * the backlog queue.  This queue is processed by the
- * owner of the socket lock right before it is released.
- *
- * Since ~2.3.5 it is also exclusive sleep lock serializing
- * accesses from user process context.
- */
-#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
-
 static inline void sock_release_ownership(struct sock *sk)
 {
        if (sk->sk_lock.owned) {
@@ -1403,6 +1407,40 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
                spin_unlock_bh(&sk->sk_lock.slock);
 }
 
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue.  This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+
+static inline void sock_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
+static inline bool sock_owned_by_user(const struct sock *sk)
+{
+       sock_owned_by_me(sk);
+       return sk->sk_lock.owned;
+}
+
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+       struct sock *sk = (struct sock *)csk;
+
+       return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+}
 
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                      struct proto *prot, int kern);
@@ -1412,6 +1450,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
 
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority);
+void __sock_wfree(struct sk_buff *skb);
 void sock_wfree(struct sk_buff *skb);
 void skb_orphan_partial(struct sk_buff *skb);
 void sock_rfree(struct sk_buff *skb);
@@ -1918,11 +1957,19 @@ static inline unsigned long sock_wspace(struct sock *sk)
  */
 static inline void sk_set_bit(int nr, struct sock *sk)
 {
+       if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+           !sock_flag(sk, SOCK_FASYNC))
+               return;
+
        set_bit(nr, &sk->sk_wq_raw->flags);
 }
 
 static inline void sk_clear_bit(int nr, struct sock *sk)
 {
+       if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+           !sock_flag(sk, SOCK_FASYNC))
+               return;
+
        clear_bit(nr, &sk->sk_wq_raw->flags);
 }
 
index d451122e8404af871aca0ef71db2a6af37d1c2b3..51d77b2ce2b291d6ffa3c3f467ddf8c564892e48 100644 (file)
@@ -54,6 +54,8 @@ struct switchdev_attr {
        struct net_device *orig_dev;
        enum switchdev_attr_id id;
        u32 flags;
+       void *complete_priv;
+       void (*complete)(struct net_device *dev, int err, void *priv);
        union {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
        struct net_device *orig_dev;
        enum switchdev_obj_id id;
        u32 flags;
+       void *complete_priv;
+       void (*complete)(struct net_device *dev, int err, void *priv);
 };
 
 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
index dae96bae1c19c2d71fa7c0ea65e74d064e3b3757..e891835eb74e765bbed654362f1bf53439780904 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_TC_MIR_H
 
 #include <net/act_api.h>
+#include <linux/tc_act/tc_mirred.h>
 
 struct tcf_mirred {
        struct tcf_common       common;
@@ -14,4 +15,18 @@ struct tcf_mirred {
 #define to_mirred(a) \
        container_of(a->priv, struct tcf_mirred, common)
 
+static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+               return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
+#endif
+       return false;
+}
+
+static inline int tcf_mirred_ifindex(const struct tc_action *a)
+{
+       return to_mirred(a)->tcfm_ifindex;
+}
+
 #endif /* __NET_TC_MIR_H */
index 74d3ed5eb219047189904627399af094ddd27584..0bcc70f4e1fb7ed574f787d636afaa9a87628d69 100644 (file)
@@ -332,9 +332,8 @@ bool tcp_check_oom(struct sock *sk, int shift);
 extern struct proto tcp_prot;
 
 #define TCP_INC_STATS(net, field)      SNMP_INC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define __TCP_INC_STATS(net, field)    __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 #define TCP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
 void tcp_tasklet_init(void);
@@ -452,10 +451,15 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int tcp_connect(struct sock *sk);
+enum tcp_synack_type {
+       TCP_SYNACK_NORMAL,
+       TCP_SYNACK_FASTOPEN,
+       TCP_SYNACK_COOKIE,
+};
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
                                struct tcp_fastopen_cookie *foc,
-                               bool attach_req);
+                               enum tcp_synack_type synack_type);
 int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -533,8 +537,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
                               int nonagle);
 bool tcp_may_send_now(struct sock *sk);
-int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 void tcp_retransmit_timer(struct sock *sk);
 void tcp_xmit_retransmit_queue(struct sock *);
 void tcp_simple_retransmit(struct sock *);
@@ -552,6 +556,8 @@ void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
 bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb);
 
 /* tcp_input.c */
 void tcp_resume_early_retransmit(struct sock *sk);
@@ -755,14 +761,20 @@ struct tcp_skb_cb {
 
        __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
        __u8            txstamp_ack:1,  /* Record TX timestamp for ack? */
-                       unused:7;
+                       eor:1,          /* Is skb MSG_EOR marked? */
+                       unused:6;
        __u32           ack_seq;        /* Sequence number ACK'd        */
        union {
-               struct inet_skb_parm    h4;
+               struct {
+                       /* There is space for up to 20 bytes */
+               } tx;   /* only used for outgoing skbs */
+               union {
+                       struct inet_skb_parm    h4;
 #if IS_ENABLED(CONFIG_IPV6)
-               struct inet6_skb_parm   h6;
+                       struct inet6_skb_parm   h6;
 #endif
-       } header;       /* For incoming frames          */
+               } header;       /* For incoming skbs */
+       };
 };
 
 #define TCP_SKB_CB(__skb)      ((struct tcp_skb_cb *)&((__skb)->cb[0]))
@@ -774,7 +786,9 @@ struct tcp_skb_cb {
  */
 static inline int tcp_v6_iif(const struct sk_buff *skb)
 {
-       return TCP_SKB_CB(skb)->header.h6.iif;
+       bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+
+       return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 }
 #endif
 
@@ -802,6 +816,11 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
        return TCP_SKB_CB(skb)->tcp_gso_size;
 }
 
+static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
+{
+       return likely(!TCP_SKB_CB(skb)->eor);
+}
+
 /* Events passed to congestion control interface */
 enum tcp_ca_event {
        CA_EVENT_TX_START,      /* first transmit when no packets in flight */
@@ -837,6 +856,11 @@ enum tcp_ca_ack_event_flags {
 
 union tcp_cc_info;
 
+struct ack_sample {
+       u32 pkts_acked;
+       s32 rtt_us;
+};
+
 struct tcp_congestion_ops {
        struct list_head        list;
        u32 key;
@@ -860,7 +884,7 @@ struct tcp_congestion_ops {
        /* new value of cwnd after loss (optional) */
        u32  (*undo_cwnd)(struct sock *sk);
        /* hook for packet ack accounting (optional) */
-       void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
+       void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
        /* get info for inet_diag (optional) */
        size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
                           union tcp_cc_info *info);
@@ -1291,10 +1315,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
 static inline void tcp_mib_init(struct net *net)
 {
        /* See RFC 2012 */
-       TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
-       TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
-       TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
-       TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
+       TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+       TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+       TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+       TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
 }
 
 /* from STCP */
@@ -1728,7 +1752,7 @@ struct tcp_request_sock_ops {
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
                           struct tcp_fastopen_cookie *foc,
-                          bool attach_req);
+                          enum tcp_synack_type synack_type);
 };
 
 #ifdef CONFIG_SYN_COOKIES
@@ -1737,7 +1761,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
                                         __u16 *mss)
 {
        tcp_synq_overflow(sk);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
        return ops->cookie_init_seq(skb, mss);
 }
 #else
@@ -1846,7 +1870,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
 static inline void tcp_listendrop(const struct sock *sk)
 {
        atomic_inc(&((struct sock *)sk)->sk_drops);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 }
 
 #endif /* _TCP_H */
index 2b1c3450ab20d57f9faa1268291252179b232efb..276f9760ab561c52663338af34f3d6803f38b222 100644 (file)
@@ -41,8 +41,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
                                    struct sk_buff *skb);
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
-                         struct flowi6 *fl6, struct ipv6_txoptions *opt,
-                         int *hlimit, int *tclass, int *dontfrag,
+                         struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
                          struct sockcm_cookie *sockc);
 
 void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
index 3c5a65e0946da399d9db0d2a4e9c1529bc203e35..ae07f375370da41271b839a272676ac970c3647e 100644 (file)
@@ -289,32 +289,32 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 /*
  *     SNMP statistics for UDP and UDP-Lite
  */
-#define UDP_INC_STATS_USER(net, field, is_udplite)           do { \
-       if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field);       \
-       else            SNMP_INC_STATS_USER((net)->mib.udp_statistics, field);  }  while(0)
-#define UDP_INC_STATS_BH(net, field, is_udplite)             do { \
-       if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field);         \
-       else            SNMP_INC_STATS_BH((net)->mib.udp_statistics, field);    }  while(0)
-
-#define UDP6_INC_STATS_BH(net, field, is_udplite)          do { \
-       if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
-       else            SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field);  \
+#define UDP_INC_STATS(net, field, is_udplite)                do { \
+       if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
+       else            SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
+#define __UDP_INC_STATS(net, field, is_udplite)              do { \
+       if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
+       else            __SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
+
+#define __UDP6_INC_STATS(net, field, is_udplite)           do { \
+       if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+       else            __SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
 } while(0)
-#define UDP6_INC_STATS_USER(net, field, __lite)                    do { \
-       if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field);  \
-       else        SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
+#define UDP6_INC_STATS(net, field, __lite)                 do { \
+       if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
+       else        SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
 #if IS_ENABLED(CONFIG_IPV6)
-#define UDPX_INC_STATS_BH(sk, field)                                   \
+#define __UDPX_INC_STATS(sk, field)                                    \
 do {                                                                   \
        if ((sk)->sk_family == AF_INET)                                 \
-               UDP_INC_STATS_BH(sock_net(sk), field, 0);               \
+               __UDP_INC_STATS(sock_net(sk), field, 0);                \
        else                                                            \
-               UDP6_INC_STATS_BH(sock_net(sk), field, 0);              \
+               __UDP6_INC_STATS(sock_net(sk), field, 0);               \
 } while (0)
 #else
-#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
+#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
 #endif
 
 /* /proc */
index 2dcf1de948ac74d9c5b9d5144e413fa12fa5463e..9d14f707e5345dfe8da7523a3e5d6293377725cd 100644 (file)
@@ -105,23 +105,13 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
                                    __be16 flags, __be64 tunnel_id,
                                    int md_size);
 
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
-                                                        bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
 {
        int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
 
        return iptunnel_handle_offloads(skb, type);
 }
 
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
-       struct udphdr *uh;
-
-       uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
-       skb_shinfo(skb)->gso_type |= uh->check ?
-                               SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
-
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
 #if IS_ENABLED(CONFIG_IPV6)
index 2f168f0ea32c39f70ce8e64a34dbda2668f4ec53..b8803165df9120e77f08e90b6eae313a90669e0e 100644 (file)
@@ -184,9 +184,7 @@ struct vxlan_metadata {
 /* per UDP socket information */
 struct vxlan_sock {
        struct hlist_node hlist;
-       struct work_struct del_work;
        struct socket    *sock;
-       struct rcu_head   rcu;
        struct hlist_head vni_list[VNI_HASH_SIZE];
        atomic_t          refcnt;
        u32               flags;
@@ -319,7 +317,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
            (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
             skb->inner_protocol != htons(ETH_P_TEB) ||
             (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+            (skb->ip_summed != CHECKSUM_NONE &&
+             !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
@@ -392,13 +392,11 @@ static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
        return vni_field;
 }
 
-#if IS_ENABLED(CONFIG_VXLAN)
-void vxlan_get_rx_port(struct net_device *netdev);
-#else
 static inline void vxlan_get_rx_port(struct net_device *netdev)
 {
+       ASSERT_RTNL();
+       call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev);
 }
-#endif
 
 static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
 {
index d6f6e5006ee9e3cb8dcd61e151fcc95ce165eab9..adfebd6f243c14d18b23988aa27c23c2a88f985e 100644 (file)
 
 #ifdef CONFIG_XFRM_STATISTICS
 #define XFRM_INC_STATS(net, field)     SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_BH(net, field)  SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_USER(net, field)        SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
 #else
 #define XFRM_INC_STATS(net, field)     ((void)(net))
-#define XFRM_INC_STATS_BH(net, field)  ((void)(net))
-#define XFRM_INC_STATS_USER(net, field)        ((void)(net))
 #endif
 
 
index cf8f9e700e48939b7633a97df7f815486e0fb8a8..a6b93706b0fc96494d7de3d7408c26fb57436a02 100644 (file)
@@ -34,6 +34,7 @@
 #define _RDMA_IB_H
 
 #include <linux/types.h>
+#include <linux/sched.h>
 
 struct ib_addr {
        union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
        __u64                   sib_scope_id;
 };
 
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+       return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
 #endif /* _RDMA_IB_H */
index 9ebab3a8cf0aa2f566fda02a9b3e4e42561ca42c..b2017440b7654ab37045fabad95b076d539d95d0 100644 (file)
@@ -68,8 +68,6 @@ struct rxrpc_wire_header {
 
 } __packed;
 
-extern const char *rxrpc_pkts[];
-
 #define RXRPC_SUPPORTED_PACKET_TYPES (                 \
                (1 << RXRPC_PACKET_TYPE_DATA) |         \
                (1 << RXRPC_PACKET_TYPE_ACK) |          \
index c067019ed12a7f4acb6c31391ede379527893098..74d79bde70750c717284b34ab77ac5f1dd7774b6 100644 (file)
@@ -516,6 +516,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
        return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
 }
 
+/**
+ * scsi_device_supports_vpd - test if a device supports VPD pages
+ * @sdev: the &struct scsi_device to test
+ *
+ * If the 'try_vpd_pages' flag is set it takes precedence.
+ * Otherwise we will assume VPD pages are supported if the
+ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
+ */
+static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
+{
+       /* Attempt VPD inquiry if the device blacklist explicitly calls
+        * for it.
+        */
+       if (sdev->try_vpd_pages)
+               return 1;
+       /*
+        * Although VPD inquiries can go to SCSI-2 type devices,
+        * some USB ones crash on receiving them, and the pages
+        * we currently ask for are for SPC-3 and beyond
+        */
+       if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
+               return 1;
+       return 0;
+}
+
 #define MODULE_ALIAS_SCSI_DEVICE(type) \
        MODULE_ALIAS("scsi:t-" __stringify(type) "*")
 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
index fa341fcb5829ab80114e6b22d69fc06856259cbd..f5842bcd9c94ca633a2673d7a4bdceac76423364 100644 (file)
@@ -9,7 +9,7 @@
 #ifdef CONFIG_SND_HDA_I915
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
 int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
 int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
                           bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
 {
        return 0;
 }
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
 {
-       return 0;
 }
 static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
                                           int rate)
index 2767c55a641edd64b7018a0d7693826ae60dbad9..ca64f0f50b4533df080a2daa6b1cba2fdf5f0a12 100644 (file)
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
                                    unsigned int verb);
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val);
 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
                              unsigned int val);
 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
index 685a51aa98cca1b52abc08a95905d9598135516c..8ff6d40a294fd5a1fcbd8f10d0139d289c00d741 100644 (file)
@@ -76,6 +76,7 @@ struct target_core_fabric_ops {
        struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
                                struct config_group *, const char *);
        void (*fabric_drop_wwn)(struct se_wwn *);
+       void (*add_wwn_groups)(struct se_wwn *);
        struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
                                struct config_group *, const char *);
        void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@ struct target_core_fabric_ops {
                                struct config_group *, const char *);
        void (*fabric_drop_np)(struct se_tpg_np *);
        int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
-       void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
 
        struct configfs_attribute **tfc_discovery_attrs;
        struct configfs_attribute **tfc_wwn_attrs;
index 677807f29a1cd18ae142f0f6e95999b0b6e1125e..e90e82ad68754cc8776935961275631507695ac4 100644 (file)
@@ -23,7 +23,7 @@ struct map_lookup;
 struct extent_buffer;
 struct btrfs_work;
 struct __btrfs_workqueue;
-struct btrfs_qgroup_operation;
+struct btrfs_qgroup_extent_record;
 
 #define show_ref_type(type)                                            \
        __print_symbolic(type,                                          \
@@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
 
        TP_ARGS(ref_root, reserved)
 );
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+       TP_ARGS(rec),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+       ),
+
+       TP_fast_assign(
+               __entry->bytenr         = rec->bytenr,
+               __entry->num_bytes      = rec->num_bytes;
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
+
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+       TP_ARGS(rec)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+
+       TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+       TP_ARGS(rec)
+);
+
+TRACE_EVENT(btrfs_qgroup_account_extent,
+
+       TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
+
+       TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr                    )
+               __field(        u64,  num_bytes                 )
+               __field(        u64,  nr_old_roots              )
+               __field(        u64,  nr_new_roots              )
+       ),
+
+       TP_fast_assign(
+               __entry->bytenr         = bytenr;
+               __entry->num_bytes      = num_bytes;
+               __entry->nr_old_roots   = nr_old_roots;
+               __entry->nr_new_roots   = nr_new_roots;
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
+                 "nr_new_roots = %llu",
+                 __entry->bytenr,
+                 __entry->num_bytes,
+                 __entry->nr_old_roots,
+                 __entry->nr_new_roots)
+);
+
+TRACE_EVENT(qgroup_update_counters,
+
+       TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
+
+       TP_ARGS(qgid, cur_old_count, cur_new_count),
+
+       TP_STRUCT__entry(
+               __field(        u64,  qgid                      )
+               __field(        u64,  cur_old_count             )
+               __field(        u64,  cur_new_count             )
+       ),
+
+       TP_fast_assign(
+               __entry->qgid           = qgid;
+               __entry->cur_old_count  = cur_old_count;
+               __entry->cur_new_count  = cur_new_count;
+       ),
+
+       TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+                 __entry->qgid,
+                 __entry->cur_old_count,
+                 __entry->cur_new_count)
+);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */
index a182306eefd7a825d1f40483dbc1d206ea3ef59a..88de5c205e86f7040854221baa2fe04c737daa3a 100644 (file)
@@ -64,16 +64,9 @@ perf_trace_##call(void *__data, proto)                                       \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       if (prog) {                                                     \
-               *(struct pt_regs **)entry = __regs;                     \
-               if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \
-                       perf_swevent_put_recursion_context(rctx);       \
-                       return;                                         \
-               }                                                       \
-       }                                                               \
-       perf_trace_buf_submit(entry, __entry_size, rctx,                \
-                             event_call->event.type, __count, __regs,  \
-                             head, __task);                            \
+       perf_trace_run_bpf_submit(entry, __entry_size, rctx,            \
+                                 event_call, __count, __regs,          \
+                                 head, __task);                        \
 }
 
 /*
index 2622b33fb2ec7d0e37e437198ac5d1637eed224d..c51afb71bfabc658b16be2773ad5a0e78ca1d09b 100644 (file)
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
 __SYSCALL(__NR_mlock2, sys_mlock2)
 #define __NR_copy_file_range 285
 __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 286
+#define __NR_syscalls 288
 
 /*
  * All syscalls below here should go away really,
index b71fd0b5cbad7d375f3188114be65431def88907..8bdae34d1f9add25d968182c6540c431d994c7ce 100644 (file)
@@ -96,6 +96,7 @@ header-y += cyclades.h
 header-y += cycx_cfm.h
 header-y += dcbnl.h
 header-y += dccp.h
+header-y += devlink.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
 header-y += dlm.h
@@ -140,6 +141,7 @@ header-y += gfs2_ondisk.h
 header-y += gigaset_dev.h
 header-y += gpio.h
 header-y += gsmmux.h
+header-y += gtp.h
 header-y += hdlcdrv.h
 header-y += hdlc.h
 header-y += hdreg.h
index 70eda5aeb304281cb3462f25445886111726f78a..406459b935a27c4f9b518426a4fd60493432221d 100644 (file)
@@ -347,6 +347,10 @@ enum bpf_func_id {
 #define BPF_F_ZERO_CSUM_TX             (1ULL << 1)
 #define BPF_F_DONT_FRAGMENT            (1ULL << 2)
 
+/* BPF_FUNC_perf_event_output flags. */
+#define BPF_F_INDEX_MASK               0xffffffffULL
+#define BPF_F_CURRENT_CPU              BPF_F_INDEX_MASK
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -366,6 +370,8 @@ struct __sk_buff {
        __u32 cb[5];
        __u32 hash;
        __u32 tc_classid;
+       __u32 data;
+       __u32 data_end;
 };
 
 struct bpf_tunnel_key {
index c9fee5781eb1df82a045ed72d2afe2cd2381e381..ba0073b26fa66fb185353347231b5fdd37a4e1d8 100644 (file)
@@ -33,6 +33,30 @@ enum devlink_command {
        DEVLINK_CMD_PORT_SPLIT,
        DEVLINK_CMD_PORT_UNSPLIT,
 
+       DEVLINK_CMD_SB_GET,             /* can dump */
+       DEVLINK_CMD_SB_SET,
+       DEVLINK_CMD_SB_NEW,
+       DEVLINK_CMD_SB_DEL,
+
+       DEVLINK_CMD_SB_POOL_GET,        /* can dump */
+       DEVLINK_CMD_SB_POOL_SET,
+       DEVLINK_CMD_SB_POOL_NEW,
+       DEVLINK_CMD_SB_POOL_DEL,
+
+       DEVLINK_CMD_SB_PORT_POOL_GET,   /* can dump */
+       DEVLINK_CMD_SB_PORT_POOL_SET,
+       DEVLINK_CMD_SB_PORT_POOL_NEW,
+       DEVLINK_CMD_SB_PORT_POOL_DEL,
+
+       DEVLINK_CMD_SB_TC_POOL_BIND_GET,        /* can dump */
+       DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+       DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+       DEVLINK_CMD_SB_TC_POOL_BIND_DEL,
+
+       /* Shared buffer occupancy monitoring commands */
+       DEVLINK_CMD_SB_OCC_SNAPSHOT,
+       DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+
        /* add new commands above here */
 
        __DEVLINK_CMD_MAX,
@@ -46,6 +70,31 @@ enum devlink_port_type {
        DEVLINK_PORT_TYPE_IB,
 };
 
+enum devlink_sb_pool_type {
+       DEVLINK_SB_POOL_TYPE_INGRESS,
+       DEVLINK_SB_POOL_TYPE_EGRESS,
+};
+
+/* static threshold - limiting the maximum number of bytes.
+ * dynamic threshold - limiting the maximum number of bytes
+ *   based on the currently available free space in the shared buffer pool.
+ *   In this mode, the maximum quota is calculated based
+ *   on the following formula:
+ *     max_quota = alpha / (1 + alpha) * Free_Buffer
+ *   While Free_Buffer is the amount of none-occupied buffer associated to
+ *   the relevant pool.
+ *   The value range which can be passed is 0-20 and serves
+ *   for computation of alpha by following formula:
+ *     alpha = 2 ^ (passed_value - 10)
+ */
+
+enum devlink_sb_threshold_type {
+       DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+       DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC,
+};
+
+#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
+
 enum devlink_attr {
        /* don't change the order or add anything between, this is ABI! */
        DEVLINK_ATTR_UNSPEC,
@@ -62,6 +111,20 @@ enum devlink_attr {
        DEVLINK_ATTR_PORT_IBDEV_NAME,           /* string */
        DEVLINK_ATTR_PORT_SPLIT_COUNT,          /* u32 */
        DEVLINK_ATTR_PORT_SPLIT_GROUP,          /* u32 */
+       DEVLINK_ATTR_SB_INDEX,                  /* u32 */
+       DEVLINK_ATTR_SB_SIZE,                   /* u32 */
+       DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,     /* u16 */
+       DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,      /* u16 */
+       DEVLINK_ATTR_SB_INGRESS_TC_COUNT,       /* u16 */
+       DEVLINK_ATTR_SB_EGRESS_TC_COUNT,        /* u16 */
+       DEVLINK_ATTR_SB_POOL_INDEX,             /* u16 */
+       DEVLINK_ATTR_SB_POOL_TYPE,              /* u8 */
+       DEVLINK_ATTR_SB_POOL_SIZE,              /* u32 */
+       DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,    /* u8 */
+       DEVLINK_ATTR_SB_THRESHOLD,              /* u32 */
+       DEVLINK_ATTR_SB_TC_INDEX,               /* u16 */
+       DEVLINK_ATTR_SB_OCC_CUR,                /* u32 */
+       DEVLINK_ATTR_SB_OCC_MAX,                /* u32 */
 
        /* add new attributes above here, update the policy in devlink.c */
 
index 96161b8202b5d026ed39904f15a899659fc39adb..620c8a5ddc0003cb785c1dceb365586b336c763c 100644 (file)
@@ -49,6 +49,7 @@ enum {
        FRA_TABLE,      /* Extended table id */
        FRA_FWMASK,     /* mask for netfilter mark */
        FRA_OIFNAME,
+       FRA_PAD,
        __FRA_MAX
 };
 
index 6487317ea619c41d4ea662f1162417fa6c6e947f..52deccc2128eeb955b412aeef5bdb0ed8098d5f8 100644 (file)
@@ -10,6 +10,7 @@ enum {
        TCA_STATS_QUEUE,
        TCA_STATS_APP,
        TCA_STATS_RATE_EST64,
+       TCA_STATS_PAD,
        __TCA_STATS_MAX,
 };
 #define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
new file mode 100644 (file)
index 0000000..ca1054d
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _UAPI_LINUX_GTP_H_
+#define _UAPI_LINUX_GTP_H__
+
+enum gtp_genl_cmds {
+       GTP_CMD_NEWPDP,
+       GTP_CMD_DELPDP,
+       GTP_CMD_GETPDP,
+
+       GTP_CMD_MAX,
+};
+
+enum gtp_version {
+       GTP_V0 = 0,
+       GTP_V1,
+};
+
+enum gtp_attrs {
+       GTPA_UNSPEC = 0,
+       GTPA_LINK,
+       GTPA_VERSION,
+       GTPA_TID,       /* for GTPv0 only */
+       GTPA_SGSN_ADDRESS,
+       GTPA_MS_ADDRESS,
+       GTPA_FLOW,
+       GTPA_NET_NS_FD,
+       GTPA_I_TEI,     /* for GTPv1 only */
+       GTPA_O_TEI,     /* for GTPv1 only */
+       GTPA_PAD,
+       __GTPA_MAX,
+};
+#define GTPA_MAX (__GTPA_MAX + 1)
+
+#endif /* _UAPI_LINUX_GTP_H_ */
index 0536eefff9bfee41c0de6088f38cf84b1d29a12f..397d503fdedbe979dbd00e326ae55a4f2fb5a09c 100644 (file)
@@ -134,6 +134,16 @@ struct bridge_vlan_info {
        __u16 vid;
 };
 
+struct bridge_vlan_xstats {
+       __u64 rx_bytes;
+       __u64 rx_packets;
+       __u64 tx_bytes;
+       __u64 tx_packets;
+       __u16 vid;
+       __u16 pad1;
+       __u32 pad2;
+};
+
 /* Bridge multicast database attributes
  * [MDBA_MDB] = {
  *     [MDBA_MDB_ENTRY] = {
@@ -233,4 +243,12 @@ enum {
 };
 #define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
 
+/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
+enum {
+       BRIDGE_XSTATS_UNSPEC,
+       BRIDGE_XSTATS_VLAN,
+       __BRIDGE_XSTATS_MAX
+};
+#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_BRIDGE_H */
index 4a93051c578ce49c78140ae17878b3a3bbeba456..cec849a239f62001154c75edc46b19c623200115 100644 (file)
@@ -92,6 +92,7 @@
 #define ETH_P_TDLS     0x890D          /* TDLS */
 #define ETH_P_FIP      0x8914          /* FCoE Initialization Protocol */
 #define ETH_P_80221    0x8917          /* IEEE 802.21 Media Independent Handover Protocol */
+#define ETH_P_HSR      0x892F          /* IEC 62439-3 HSRv1    */
 #define ETH_P_LOOPBACK 0x9000          /* Ethernet loopback packet, per IEEE 802.3 */
 #define ETH_P_QINQ1    0x9100          /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_QINQ2    0x9200          /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
index 9427f17d06d6d2c0fa6ccf6b4ddc85598cc1c2ea..bb36bd5675a7ceb641a597aa4d82d428cdaaf529 100644 (file)
@@ -155,6 +155,7 @@ enum {
        IFLA_PROTO_DOWN,
        IFLA_GSO_MAX_SEGS,
        IFLA_GSO_MAX_SIZE,
+       IFLA_PAD,
        __IFLA_MAX
 };
 
@@ -270,6 +271,8 @@ enum {
        IFLA_BR_NF_CALL_IP6TABLES,
        IFLA_BR_NF_CALL_ARPTABLES,
        IFLA_BR_VLAN_DEFAULT_PVID,
+       IFLA_BR_PAD,
+       IFLA_BR_VLAN_STATS_ENABLED,
        __IFLA_BR_MAX,
 };
 
@@ -312,6 +315,7 @@ enum {
        IFLA_BRPORT_HOLD_TIMER,
        IFLA_BRPORT_FLUSH,
        IFLA_BRPORT_MULTICAST_ROUTER,
+       IFLA_BRPORT_PAD,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -431,6 +435,7 @@ enum {
        IFLA_MACSEC_SCB,
        IFLA_MACSEC_REPLAY_PROTECT,
        IFLA_MACSEC_VALIDATION,
+       IFLA_MACSEC_PAD,
        __IFLA_MACSEC_MAX,
 };
 
@@ -516,6 +521,24 @@ enum {
 };
 #define IFLA_GENEVE_MAX        (__IFLA_GENEVE_MAX - 1)
 
+/* PPP section */
+enum {
+       IFLA_PPP_UNSPEC,
+       IFLA_PPP_DEV_FD,
+       __IFLA_PPP_MAX
+};
+#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
+
+/* GTP section */
+enum {
+       IFLA_GTP_UNSPEC,
+       IFLA_GTP_FD0,
+       IFLA_GTP_FD1,
+       IFLA_GTP_PDP_HASHSIZE,
+       __IFLA_GTP_MAX,
+};
+#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
+
 /* Bonding section */
 
 enum {
@@ -665,6 +688,7 @@ enum {
        IFLA_VF_STATS_TX_BYTES,
        IFLA_VF_STATS_BROADCAST,
        IFLA_VF_STATS_MULTICAST,
+       IFLA_VF_STATS_PAD,
        __IFLA_VF_STATS_MAX,
 };
 
@@ -775,9 +799,46 @@ enum {
        IFLA_HSR_MULTICAST_SPEC,        /* Last byte of supervision addr */
        IFLA_HSR_SUPERVISION_ADDR,      /* Supervision frame multicast addr */
        IFLA_HSR_SEQ_NR,
+       IFLA_HSR_VERSION,               /* HSR version */
        __IFLA_HSR_MAX,
 };
 
 #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
 
+/* STATS section */
+
+struct if_stats_msg {
+       __u8  family;
+       __u8  pad1;
+       __u16 pad2;
+       __u32 ifindex;
+       __u32 filter_mask;
+};
+
+/* A stats attribute can be netdev specific or a global stat.
+ * For netdev stats, lets use the prefix IFLA_STATS_LINK_*
+ */
+enum {
+       IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
+       IFLA_STATS_LINK_64,
+       IFLA_STATS_LINK_XSTATS,
+       __IFLA_STATS_MAX,
+};
+
+#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1)
+
+#define IFLA_STATS_FILTER_BIT(ATTR)    (1 << (ATTR - 1))
+
+/* These are embedded into IFLA_STATS_LINK_XSTATS:
+ * [IFLA_STATS_LINK_XSTATS]
+ * -> [LINK_XSTATS_TYPE_xxx]
+ *    -> [rtnl link type specific attributes]
+ */
+enum {
+       LINK_XSTATS_TYPE_UNSPEC,
+       LINK_XSTATS_TYPE_BRIDGE,
+       __LINK_XSTATS_TYPE_MAX
+};
+#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_LINK_H */
index 26b0d1e3e3e7c199c852130fb16008858c5b20a3..f7d4831a2cc7c14b53adbfe8fcf2258cf879ca8e 100644 (file)
 
 #define MACSEC_MAX_KEY_LEN 128
 
-#define DEFAULT_CIPHER_ID   0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
+#define MACSEC_KEYID_LEN 16
+
+#define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
 
 #define MACSEC_MIN_ICV_LEN 8
 #define MACSEC_MAX_ICV_LEN 32
@@ -55,6 +57,7 @@ enum macsec_secy_attrs {
        MACSEC_SECY_ATTR_INC_SCI,
        MACSEC_SECY_ATTR_ES,
        MACSEC_SECY_ATTR_SCB,
+       MACSEC_SECY_ATTR_PAD,
        __MACSEC_SECY_ATTR_END,
        NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END,
        MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1,
@@ -66,6 +69,7 @@ enum macsec_rxsc_attrs {
        MACSEC_RXSC_ATTR_ACTIVE,  /* config/dump, u8 0..1 */
        MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */
        MACSEC_RXSC_ATTR_STATS,   /* dump, nested, macsec_rxsc_stats_attr */
+       MACSEC_RXSC_ATTR_PAD,
        __MACSEC_RXSC_ATTR_END,
        NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END,
        MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1,
@@ -77,8 +81,9 @@ enum macsec_sa_attrs {
        MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
        MACSEC_SA_ATTR_PN,     /* config/dump, u32 */
        MACSEC_SA_ATTR_KEY,    /* config, data */
-       MACSEC_SA_ATTR_KEYID,  /* config/dump, u64 */
+       MACSEC_SA_ATTR_KEYID,  /* config/dump, 128-bit */
        MACSEC_SA_ATTR_STATS,  /* dump, nested, macsec_sa_stats_attr */
+       MACSEC_SA_ATTR_PAD,
        __MACSEC_SA_ATTR_END,
        NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
        MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
@@ -110,6 +115,7 @@ enum macsec_rxsc_stats_attr {
        MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
        MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
        MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+       MACSEC_RXSC_STATS_ATTR_PAD,
        __MACSEC_RXSC_STATS_ATTR_END,
        NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END,
        MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1,
@@ -137,6 +143,7 @@ enum macsec_txsc_stats_attr {
        MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
        MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
        MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+       MACSEC_TXSC_STATS_ATTR_PAD,
        __MACSEC_TXSC_STATS_ATTR_END,
        NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END,
        MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1,
@@ -153,6 +160,7 @@ enum macsec_secy_stats_attr {
        MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
        MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
        MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+       MACSEC_SECY_STATS_ATTR_PAD,
        __MACSEC_SECY_STATS_ATTR_END,
        NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END,
        MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1,
index abde7bbd6f3b7c2e59b9b7f52baa758731ccde85..948c0a91e11b5bb50353a90f173cc0999c8f9a32 100644 (file)
@@ -14,6 +14,8 @@ enum {
        ILA_ATTR_LOCATOR_MATCH,                 /* u64 */
        ILA_ATTR_IFINDEX,                       /* s32 */
        ILA_ATTR_DIR,                           /* u32 */
+       ILA_ATTR_PAD,
+       ILA_ATTR_CSUM_MODE,                     /* u8 */
 
        __ILA_ATTR_MAX,
 };
@@ -34,4 +36,10 @@ enum {
 #define ILA_DIR_IN     (1 << 0)
 #define ILA_DIR_OUT    (1 << 1)
 
+enum {
+       ILA_CSUM_ADJUST_TRANSPORT,
+       ILA_CSUM_NEUTRAL_MAP,
+       ILA_CSUM_NO_ACTION,
+};
+
 #endif /* _UAPI_LINUX_ILA_H */
index 68a1f71fde9f7bcc90c21f586b57f1eab61de941..a1664370566967fe9f3862921ffab13cc86385d0 100644 (file)
@@ -113,9 +113,13 @@ enum {
        INET_DIAG_DCTCPINFO,
        INET_DIAG_PROTOCOL,  /* response attribute only */
        INET_DIAG_SKV6ONLY,
+       INET_DIAG_LOCALS,
+       INET_DIAG_PEERS,
+       INET_DIAG_PAD,
+       __INET_DIAG_MAX,
 };
 
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX (__INET_DIAG_MAX - 1)
 
 /* INET_DIAG_MEM */
 
index 391395c06c7ea55d80fb64dfefc77b122b0a56d4..22d69894bc92ac63c6f96b7835ff9f1720f518e0 100644 (file)
@@ -435,6 +435,7 @@ enum {
        IPVS_STATS_ATTR_OUTPPS,         /* current out packet rate */
        IPVS_STATS_ATTR_INBPS,          /* current in byte rate */
        IPVS_STATS_ATTR_OUTBPS,         /* current out byte rate */
+       IPVS_STATS_ATTR_PAD,
        __IPVS_STATS_ATTR_MAX,
 };
 
index 347ef22a964ee4d112fa31e1974609eb6fb525df..4bd27d0270a2af6b63ea4920274aa97088888168 100644 (file)
@@ -126,6 +126,7 @@ enum {
        L2TP_ATTR_IP6_DADDR,            /* struct in6_addr */
        L2TP_ATTR_UDP_ZERO_CSUM6_TX,    /* u8 */
        L2TP_ATTR_UDP_ZERO_CSUM6_RX,    /* u8 */
+       L2TP_ATTR_PAD,
        __L2TP_ATTR_MAX,
 };
 
@@ -142,6 +143,7 @@ enum {
        L2TP_ATTR_RX_SEQ_DISCARDS,      /* u64 */
        L2TP_ATTR_RX_OOS_PACKETS,       /* u64 */
        L2TP_ATTR_RX_ERRORS,            /* u64 */
+       L2TP_ATTR_STATS_PAD,
        __L2TP_ATTR_STATS_MAX,
 };
 
index f8b01887a4958097c9c2fede065391c553683e80..a478fe80e203bdf4865ad9ed5f01343eaacd49ce 100644 (file)
@@ -22,6 +22,7 @@ enum lwtunnel_ip_t {
        LWTUNNEL_IP_TTL,
        LWTUNNEL_IP_TOS,
        LWTUNNEL_IP_FLAGS,
+       LWTUNNEL_IP_PAD,
        __LWTUNNEL_IP_MAX,
 };
 
@@ -35,6 +36,7 @@ enum lwtunnel_ip6_t {
        LWTUNNEL_IP6_HOPLIMIT,
        LWTUNNEL_IP6_TC,
        LWTUNNEL_IP6_FLAGS,
+       LWTUNNEL_IP6_PAD,
        __LWTUNNEL_IP6_MAX,
 };
 
index 788655bfa0f3001d549e90c8fe5dd811600e3a98..bd99a8d80f36e527094f05ceabb55c21ea004ae7 100644 (file)
@@ -128,6 +128,7 @@ enum {
        NDTPA_LOCKTIME,                 /* u64, msecs */
        NDTPA_QUEUE_LENBYTES,           /* u32 */
        NDTPA_MCAST_REPROBES,           /* u32 */
+       NDTPA_PAD,
        __NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
@@ -160,6 +161,7 @@ enum {
        NDTA_PARMS,                     /* nested TLV NDTPA_* */
        NDTA_STATS,                     /* struct ndt_stats, read-only */
        NDTA_GC_INTERVAL,               /* u64, msecs */
+       NDTA_PAD,
        __NDTA_MAX
 };
 #define NDTA_MAX (__NDTA_MAX - 1)
index 63b2e34f1b60393b9593513f899d3243055c1e7e..ebb5154976decef074be170d8b15564a42f3c260 100644 (file)
@@ -118,6 +118,7 @@ enum {
        IPSET_ATTR_SKBMARK,
        IPSET_ATTR_SKBPRIO,
        IPSET_ATTR_SKBQUEUE,
+       IPSET_ATTR_PAD,
        __IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX     (__IPSET_ATTR_ADT_MAX - 1)
index eeffde196f804e2ea386eb5227f620b2371ba6f3..6a4dbe04f09ed11c4528db16d749c673eac68fad 100644 (file)
@@ -3,6 +3,7 @@
 
 #define NFT_TABLE_MAXNAMELEN   32
 #define NFT_CHAIN_MAXNAMELEN   32
+#define NFT_SET_MAXNAMELEN     32
 #define NFT_USERDATA_MAXLEN    256
 
 /**
@@ -182,6 +183,7 @@ enum nft_chain_attributes {
        NFTA_CHAIN_USE,
        NFTA_CHAIN_TYPE,
        NFTA_CHAIN_COUNTERS,
+       NFTA_CHAIN_PAD,
        __NFTA_CHAIN_MAX
 };
 #define NFTA_CHAIN_MAX         (__NFTA_CHAIN_MAX - 1)
@@ -206,6 +208,7 @@ enum nft_rule_attributes {
        NFTA_RULE_COMPAT,
        NFTA_RULE_POSITION,
        NFTA_RULE_USERDATA,
+       NFTA_RULE_PAD,
        __NFTA_RULE_MAX
 };
 #define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
@@ -308,6 +311,7 @@ enum nft_set_attributes {
        NFTA_SET_TIMEOUT,
        NFTA_SET_GC_INTERVAL,
        NFTA_SET_USERDATA,
+       NFTA_SET_PAD,
        __NFTA_SET_MAX
 };
 #define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
@@ -341,6 +345,7 @@ enum nft_set_elem_attributes {
        NFTA_SET_ELEM_EXPIRATION,
        NFTA_SET_ELEM_USERDATA,
        NFTA_SET_ELEM_EXPR,
+       NFTA_SET_ELEM_PAD,
        __NFTA_SET_ELEM_MAX
 };
 #define NFTA_SET_ELEM_MAX      (__NFTA_SET_ELEM_MAX - 1)
@@ -584,6 +589,7 @@ enum nft_dynset_attributes {
        NFTA_DYNSET_SREG_DATA,
        NFTA_DYNSET_TIMEOUT,
        NFTA_DYNSET_EXPR,
+       NFTA_DYNSET_PAD,
        __NFTA_DYNSET_MAX,
 };
 #define NFTA_DYNSET_MAX                (__NFTA_DYNSET_MAX - 1)
@@ -806,6 +812,7 @@ enum nft_limit_attributes {
        NFTA_LIMIT_BURST,
        NFTA_LIMIT_TYPE,
        NFTA_LIMIT_FLAGS,
+       NFTA_LIMIT_PAD,
        __NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX         (__NFTA_LIMIT_MAX - 1)
@@ -820,6 +827,7 @@ enum nft_counter_attributes {
        NFTA_COUNTER_UNSPEC,
        NFTA_COUNTER_BYTES,
        NFTA_COUNTER_PACKETS,
+       NFTA_COUNTER_PAD,
        __NFTA_COUNTER_MAX
 };
 #define NFTA_COUNTER_MAX       (__NFTA_COUNTER_MAX - 1)
@@ -1055,6 +1063,7 @@ enum nft_trace_attibutes {
        NFTA_TRACE_MARK,
        NFTA_TRACE_NFPROTO,
        NFTA_TRACE_POLICY,
+       NFTA_TRACE_PAD,
        __NFTA_TRACE_MAX
 };
 #define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1)
index f3e34dbbf9666d79f481c9d879cafbc2d8045022..36047ec70f373099c27e0b880438bf2e2af09035 100644 (file)
@@ -29,6 +29,7 @@ enum nfnl_acct_type {
        NFACCT_FLAGS,
        NFACCT_QUOTA,
        NFACCT_FILTER,
+       NFACCT_PAD,
        __NFACCT_MAX
 };
 #define NFACCT_MAX (__NFACCT_MAX - 1)
index c1a4e1441a25416e960349414b6a71e2c4409189..9df789709abe018f76a6b9c581718ac4374dc6da 100644 (file)
@@ -116,6 +116,7 @@ enum ctattr_protoinfo_dccp {
        CTA_PROTOINFO_DCCP_STATE,
        CTA_PROTOINFO_DCCP_ROLE,
        CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+       CTA_PROTOINFO_DCCP_PAD,
        __CTA_PROTOINFO_DCCP_MAX,
 };
 #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
@@ -135,6 +136,7 @@ enum ctattr_counters {
        CTA_COUNTERS_BYTES,             /* 64bit counters */
        CTA_COUNTERS32_PACKETS,         /* old 32bit counters, unused */
        CTA_COUNTERS32_BYTES,           /* old 32bit counters, unused */
+       CTA_COUNTERS_PAD,
        __CTA_COUNTERS_MAX
 };
 #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
@@ -143,6 +145,7 @@ enum ctattr_tstamp {
        CTA_TIMESTAMP_UNSPEC,
        CTA_TIMESTAMP_START,
        CTA_TIMESTAMP_STOP,
+       CTA_TIMESTAMP_PAD,
        __CTA_TIMESTAMP_MAX
 };
 #define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
index b67a853638ffc4046cad2184d244e310d39daa5f..ae30841ff94e4ffbbae6536609be3b431606b40f 100644 (file)
@@ -30,6 +30,14 @@ struct nfqnl_msg_packet_timestamp {
        __aligned_be64  usec;
 };
 
+enum nfqnl_vlan_attr {
+       NFQA_VLAN_UNSPEC,
+       NFQA_VLAN_PROTO,                /* __be16 skb vlan_proto */
+       NFQA_VLAN_TCI,                  /* __be16 skb htons(vlan_tci) */
+       __NFQA_VLAN_MAX,
+};
+#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1)
+
 enum nfqnl_attr_type {
        NFQA_UNSPEC,
        NFQA_PACKET_HDR,
@@ -50,6 +58,8 @@ enum nfqnl_attr_type {
        NFQA_UID,                       /* __u32 sk uid */
        NFQA_GID,                       /* __u32 sk gid */
        NFQA_SECCTX,                    /* security context string */
+       NFQA_VLAN,                      /* nested attribute: packet vlan info */
+       NFQA_L2HDR,                     /* full L2 header */
 
        __NFQA_MAX
 };
index 6da52d7b48c47a7cc7d7f48cda687a0f2479598c..2c55dd1894c3e21e06c3dfe02d4e977267e71b82 100644 (file)
  * @NL80211_CMD_ASSOCIATE: association request and notification; like
  *     NL80211_CMD_AUTHENTICATE but for Association and Reassociation
  *     (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
- *     MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ *     MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The
+ *     %NL80211_ATTR_PREV_BSSID attribute is used to specify whether the
+ *     request is for the initial association to an ESS (that attribute not
+ *     included) or for reassociation within the ESS (that attribute is
+ *     included).
  * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
  *     NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
  *     MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
  *     set of BSSID,frequency parameters is used (i.e., either the enforcing
  *     %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
  *     %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ *     %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
+ *     the ESS in case the device is already associated and an association with
+ *     a different BSS is desired.
  *     Background scan period can optionally be
  *     specified in %NL80211_ATTR_BG_SCAN_PERIOD,
  *     if not specified default background scan configuration
@@ -1287,8 +1294,11 @@ enum nl80211_commands {
  * @NL80211_ATTR_RESP_IE: (Re)association response information elements as
  *     sent by peer, for ROAM and successful CONNECT events.
  *
- * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE
- *     commands to specify using a reassociate frame
+ * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT
+ *     commands to specify a request to reassociate within an ESS, i.e., to use
+ *     Reassociate Request frame (with the value of this attribute in the
+ *     Current AP address field) instead of Association Request frame which is
+ *     used for the initial association to an ESS.
  *
  * @NL80211_ATTR_KEY: key information in a nested attribute with
  *     %NL80211_KEY_* sub-attributes
@@ -2187,6 +2197,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_STA_SUPPORT_P2P_PS,
 
+       NL80211_ATTR_PAD,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3013,6 +3025,7 @@ enum nl80211_survey_info {
        NL80211_SURVEY_INFO_TIME_RX,
        NL80211_SURVEY_INFO_TIME_TX,
        NL80211_SURVEY_INFO_TIME_SCAN,
+       NL80211_SURVEY_INFO_PAD,
 
        /* keep last */
        __NL80211_SURVEY_INFO_AFTER_LAST,
@@ -3458,6 +3471,7 @@ enum nl80211_bss {
        NL80211_BSS_BEACON_TSF,
        NL80211_BSS_PRESP_DATA,
        NL80211_BSS_LAST_SEEN_BOOTTIME,
+       NL80211_BSS_PAD,
 
        /* keep last */
        __NL80211_BSS_AFTER_LAST,
@@ -3643,11 +3657,15 @@ enum nl80211_txrate_gi {
  * @NL80211_BAND_2GHZ: 2.4 GHz ISM band
  * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
  * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
+ *     since newer kernel versions may support more bands
  */
 enum nl80211_band {
        NL80211_BAND_2GHZ,
        NL80211_BAND_5GHZ,
        NL80211_BAND_60GHZ,
+
+       NUM_NL80211_BANDS,
 };
 
 /**
index 616d04761730d1613a9b39977e277a6d26666e87..bb0d515b765458d8a8fa6442fce6c5aab6a0aad5 100644 (file)
@@ -84,6 +84,7 @@ enum ovs_datapath_attr {
        OVS_DP_ATTR_STATS,              /* struct ovs_dp_stats */
        OVS_DP_ATTR_MEGAFLOW_STATS,     /* struct ovs_dp_megaflow_stats */
        OVS_DP_ATTR_USER_FEATURES,      /* OVS_DP_F_*  */
+       OVS_DP_ATTR_PAD,
        __OVS_DP_ATTR_MAX
 };
 
@@ -253,6 +254,7 @@ enum ovs_vport_attr {
        OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
                                /* receiving upcalls */
        OVS_VPORT_ATTR_STATS,   /* struct ovs_vport_stats */
+       OVS_VPORT_ATTR_PAD,
        __OVS_VPORT_ATTR_MAX
 };
 
@@ -351,6 +353,7 @@ enum ovs_tunnel_key_attr {
        OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,         /* Nested OVS_VXLAN_EXT_* */
        OVS_TUNNEL_KEY_ATTR_IPV6_SRC,           /* struct in6_addr src IPv6 address. */
        OVS_TUNNEL_KEY_ATTR_IPV6_DST,           /* struct in6_addr dst IPv6 address. */
+       OVS_TUNNEL_KEY_ATTR_PAD,
        __OVS_TUNNEL_KEY_ATTR_MAX
 };
 
@@ -518,6 +521,7 @@ enum ovs_flow_attr {
                                  * logging should be suppressed. */
        OVS_FLOW_ATTR_UFID,      /* Variable length unique flow identifier. */
        OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
+       OVS_FLOW_ATTR_PAD,
        __OVS_FLOW_ATTR_MAX
 };
 
index c43c5f78b9c457e9e0d1addc02668b0b65e42a9e..84660905fedf92c0448c218ccf90ee768cb8d5e3 100644 (file)
@@ -66,6 +66,7 @@ enum {
        TCA_ACT_OPTIONS,
        TCA_ACT_INDEX,
        TCA_ACT_STATS,
+       TCA_ACT_PAD,
        __TCA_ACT_MAX
 };
 
@@ -173,6 +174,7 @@ enum {
        TCA_U32_PCNT,
        TCA_U32_MARK,
        TCA_U32_FLAGS,
+       TCA_U32_PAD,
        __TCA_U32_MAX
 };
 
index 8cb18b44968ea5fa49872a934093f28b128604a4..2382eed502783c23cf6f6dca8a4cca6157480d20 100644 (file)
@@ -179,6 +179,7 @@ enum {
        TCA_TBF_PRATE64,
        TCA_TBF_BURST,
        TCA_TBF_PBURST,
+       TCA_TBF_PAD,
        __TCA_TBF_MAX,
 };
 
@@ -368,6 +369,7 @@ enum {
        TCA_HTB_DIRECT_QLEN,
        TCA_HTB_RATE64,
        TCA_HTB_CEIL64,
+       TCA_HTB_PAD,
        __TCA_HTB_MAX,
 };
 
@@ -531,6 +533,7 @@ enum {
        TCA_NETEM_RATE,
        TCA_NETEM_ECN,
        TCA_NETEM_RATE64,
+       TCA_NETEM_PAD,
        __TCA_NETEM_MAX,
 };
 
@@ -715,6 +718,8 @@ enum {
        TCA_FQ_CODEL_FLOWS,
        TCA_FQ_CODEL_QUANTUM,
        TCA_FQ_CODEL_CE_THRESHOLD,
+       TCA_FQ_CODEL_DROP_BATCH_SIZE,
+       TCA_FQ_CODEL_MEMORY_LIMIT,
        __TCA_FQ_CODEL_MAX
 };
 
@@ -739,6 +744,8 @@ struct tc_fq_codel_qd_stats {
        __u32   new_flows_len;  /* count of flows in new list */
        __u32   old_flows_len;  /* count of flows in old list */
        __u32   ce_mark;        /* packets above ce_threshold */
+       __u32   memory_usage;   /* in bytes */
+       __u32   drop_overmemory;
 };
 
 struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h
new file mode 100644 (file)
index 0000000..66c0748
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _LINUX_QRTR_H
+#define _LINUX_QRTR_H
+
+#include <linux/socket.h>
+
+struct sockaddr_qrtr {
+       __kernel_sa_family_t sq_family;
+       __u32 sq_node;
+       __u32 sq_port;
+};
+
+#endif /* _LINUX_QRTR_H */
index 38baddb807f503f5f526d93377df677ab80c5ad1..4d2489ef6f1065a2303f05a1450da5e06c50ee67 100644 (file)
@@ -191,6 +191,7 @@ enum {
        QUOTA_NL_A_DEV_MAJOR,
        QUOTA_NL_A_DEV_MINOR,
        QUOTA_NL_A_CAUSED_ID,
+       QUOTA_NL_A_PAD,
        __QUOTA_NL_A_MAX,
 };
 #define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1)
diff --git a/include/uapi/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
new file mode 100644 (file)
index 0000000..5796bf1
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct rio_mport_maint_io {
+       __u16 rioid;            /* destID of remote device */
+       __u8  hopcount;         /* hopcount to remote device */
+       __u8  pad0[5];
+       __u32 offset;           /* offset in register space */
+       __u32 length;           /* length in bytes */
+       __u64 buffer;           /* pointer to data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
+#define RIO_CAP_DBL_SEND               (1 << 2)
+#define RIO_CAP_DBL_RECV               (1 << 3)
+#define RIO_CAP_PW_SEND                        (1 << 4)
+#define RIO_CAP_PW_RECV                        (1 << 5)
+#define RIO_CAP_MAP_OUTB               (1 << 6)
+#define RIO_CAP_MAP_INB                        (1 << 7)
+
+struct rio_mport_properties {
+       __u16 hdid;
+       __u8  id;                       /* Physical port ID */
+       __u8  index;
+       __u32 flags;
+       __u32 sys_size;         /* Default addressing size */
+       __u8  port_ok;
+       __u8  link_speed;
+       __u8  link_width;
+       __u8  pad0;
+       __u32 dma_max_sge;
+       __u32 dma_max_size;
+       __u32 dma_align;
+       __u32 transfer_mode;            /* Default transfer mode */
+       __u32 cap_sys_size;             /* Capable system sizes */
+       __u32 cap_addr_size;            /* Capable addressing sizes */
+       __u32 cap_transfer_mode;        /* Capable transfer modes */
+       __u32 cap_mport;                /* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL   (1 << 0)
+#define RIO_PORTWRITE  (1 << 1)
+
+struct rio_doorbell {
+       __u16 rioid;
+       __u16 payload;
+};
+
+struct rio_doorbell_filter {
+       __u16 rioid;    /* Use RIO_INVALID_DESTID to match all ids */
+       __u16 low;
+       __u16 high;
+       __u16 pad0;
+};
+
+
+struct rio_portwrite {
+       __u32 payload[16];
+};
+
+struct rio_pw_filter {
+       __u32 mask;
+       __u32 low;
+       __u32 high;
+       __u32 pad0;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR       (__u64)(~((__u64) 0))
+
+struct rio_mmap {
+       __u16 rioid;
+       __u16 pad0[3];
+       __u64 rio_addr;
+       __u64 length;
+       __u64 handle;
+       __u64 address;
+};
+
+struct rio_dma_mem {
+       __u64 length;           /* length of DMA memory */
+       __u64 dma_handle;       /* handle associated with this memory */
+       __u64 address;
+};
+
+struct rio_event {
+       __u32 header;   /* event type RIO_DOORBELL or RIO_PORTWRITE */
+       union {
+               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
+               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+       } u;
+       __u32 pad0;
+};
+
+enum rio_transfer_sync {
+       RIO_TRANSFER_SYNC,      /* synchronous transfer */
+       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
+       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+       RIO_TRANSFER_DIR_READ,  /* Read operation */
+       RIO_TRANSFER_DIR_WRITE, /* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD:    read up to 256 bytes from remote device memory into local memory
+ * NWRITE:   write up to 256 bytes from local memory to remote device memory
+ *           without confirmation
+ * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+       RIO_EXCHANGE_DEFAULT,   /* Default method */
+       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
+       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
+       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
+       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
+       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+       __u64 rio_addr; /* Address in target's RIO mem space */
+       __u64 loc_addr;
+       __u64 handle;
+       __u64 offset;   /* Offset in buffer */
+       __u64 length;   /* Length in bytes */
+       __u16 rioid;    /* Target destID */
+       __u16 method;   /* Data exchange method, one of rio_exchange enum */
+       __u32 completion_code;  /* Completion code for this transfer */
+};
+
+struct rio_transaction {
+       __u64 block;    /* Pointer to array of <count> transfers */
+       __u32 count;    /* Number of transfers */
+       __u32 transfer_mode;    /* Data transfer mode */
+       __u16 sync;     /* Synch method, one of rio_transfer_sync enum */
+       __u16 dir;      /* Transfer direction, one of rio_transfer_dir enum */
+       __u32 pad0;
+};
+
+struct rio_async_tx_wait {
+       __u32 token;    /* DMA transaction ID token */
+       __u32 timeout;  /* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ     20
+
+struct rio_rdev_info {
+       __u16 destid;
+       __u8 hopcount;
+       __u8 pad0;
+       __u32 comptag;
+       char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC           'm'
+
+#define RIO_MPORT_MAINT_HDID_SET       \
+       _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
+#define RIO_MPORT_MAINT_COMPTAG_SET    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
+#define RIO_MPORT_MAINT_PORT_IDX_GET   \
+       _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
+#define RIO_MPORT_GET_PROPERTIES \
+       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE      \
+       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK             \
+       _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
+#define RIO_GET_EVENT_MASK             \
+       _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
+#define RIO_MAP_OUTBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
+#define RIO_ALLOC_DMA \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+       _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
+#define RIO_TRANSFER \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
index ca764b5da86da440c9769693bdb0bdfce1daa623..262f0379d83ac1965d8f792b5f1d3b8634f86d69 100644 (file)
@@ -139,6 +139,11 @@ enum {
        RTM_GETNSID = 90,
 #define RTM_GETNSID RTM_GETNSID
 
+       RTM_NEWSTATS = 92,
+#define RTM_NEWSTATS RTM_NEWSTATS
+       RTM_GETSTATS = 94,
+#define RTM_GETSTATS RTM_GETSTATS
+
        __RTM_MAX,
 #define RTM_MAX                (((__RTM_MAX + 3) & ~3) - 1)
 };
@@ -312,6 +317,7 @@ enum rtattr_type_t {
        RTA_ENCAP_TYPE,
        RTA_ENCAP,
        RTA_EXPIRES,
+       RTA_PAD,
        __RTA_MAX
 };
 
@@ -536,6 +542,7 @@ enum {
        TCA_FCNT,
        TCA_STATS2,
        TCA_STAB,
+       TCA_PAD,
        __TCA_MAX
 };
 
index aa9f104287438aa32c622d006d4d30794b9b220b..621fa8ac44257ced281590206b7dda37b818177f 100644 (file)
@@ -1 +1,5 @@
 #include <linux/compiler.h>
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
index 3f10e5317b46b3858479b2af949d5a719bc72405..8f3a8f606fd95b723d9ed26e179c8a741250717f 100644 (file)
@@ -45,9 +45,7 @@
 
 static inline __attribute_const__ __u16 __fswab16(__u16 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP16__
-       return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
        return __arch_swab16(val);
 #else
        return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
 
 static inline __attribute_const__ __u32 __fswab32(__u32 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP32__
-       return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
        return __arch_swab32(val);
 #else
        return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
 
 static inline __attribute_const__ __u64 __fswab64(__u64 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP64__
-       return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
        return __arch_swab64(val);
 #elif defined(__SWAB_64_THRU_32__)
        __u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
  * __swab16 - return a byteswapped 16-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
 #define __swab16(x)                            \
        (__builtin_constant_p((__u16)(x)) ?     \
        ___constant_swab16(x) :                 \
        __fswab16(x))
+#endif
 
 /**
  * __swab32 - return a byteswapped 32-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
 #define __swab32(x)                            \
        (__builtin_constant_p((__u32)(x)) ?     \
        ___constant_swab32(x) :                 \
        __fswab32(x))
+#endif
 
 /**
  * __swab64 - return a byteswapped 64-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
 #define __swab64(x)                            \
        (__builtin_constant_p((__u64)(x)) ?     \
        ___constant_swab64(x) :                 \
        __fswab64(x))
+#endif
 
 /**
  * __swahw32 - return a word-swapped 32-bit value
index 07f17cc70bb3ee2f8ca7667221679b67f09c3cc6..063d9d465119b2519f87aa4e31961311acf026e0 100644 (file)
@@ -26,6 +26,7 @@ enum {
        TCA_ACT_BPF_OPS,
        TCA_ACT_BPF_FD,
        TCA_ACT_BPF_NAME,
+       TCA_ACT_BPF_PAD,
        __TCA_ACT_BPF_MAX,
 };
 #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
index 994b0971bce2102433b4675270dd77a69fbb5bb6..62a5e944c55485270820d36187adb6cb4d5c5b28 100644 (file)
@@ -15,6 +15,7 @@ enum {
        TCA_CONNMARK_UNSPEC,
        TCA_CONNMARK_PARMS,
        TCA_CONNMARK_TM,
+       TCA_CONNMARK_PAD,
        __TCA_CONNMARK_MAX
 };
 #define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
index a047c49a31531e0df45d29943216689795746584..8ac8041ab5f134b74391f5470cef16f3f435ed1c 100644 (file)
@@ -10,6 +10,7 @@ enum {
        TCA_CSUM_UNSPEC,
        TCA_CSUM_PARMS,
        TCA_CSUM_TM,
+       TCA_CSUM_PAD,
        __TCA_CSUM_MAX
 };
 #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
index 17dddb40f74043e40ac09e36e15c925c5aa13df0..d2a3abb77aebdef99a9166e60b95a982f06a74f4 100644 (file)
@@ -12,6 +12,7 @@ enum {
        TCA_DEF_TM,
        TCA_DEF_PARMS,
        TCA_DEF_DATA,
+       TCA_DEF_PAD,
        __TCA_DEF_MAX
 };
 #define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
index f7bf94eed510d47dec8355939b9fc41f5ee625f6..70b536a8f8b260746faecdb14ffa322f0db5ad1c 100644 (file)
@@ -25,6 +25,7 @@ enum {
        TCA_GACT_TM,
        TCA_GACT_PARMS,
        TCA_GACT_PROB,
+       TCA_GACT_PAD,
        __TCA_GACT_MAX
 };
 #define TCA_GACT_MAX (__TCA_GACT_MAX - 1)
index d648ff66586f255c055d7c45d303d96fea8bb839..4ece02a77b9aa567ca02f859735c815f064bb63f 100644 (file)
@@ -23,6 +23,7 @@ enum {
        TCA_IFE_SMAC,
        TCA_IFE_TYPE,
        TCA_IFE_METALST,
+       TCA_IFE_PAD,
        __TCA_IFE_MAX
 };
 #define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
index 130aaadf6fac9bf419c6ff13c7adae05c34f9b4c..7c6e155dd981d17a9fade88994eb82331a4309a8 100644 (file)
@@ -14,6 +14,7 @@ enum {
        TCA_IPT_CNT,
        TCA_IPT_TM,
        TCA_IPT_TARG,
+       TCA_IPT_PAD,
        __TCA_IPT_MAX
 };
 #define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
index 7561750e8fd69b92e1e1a4929e25ec8709d74ce2..3d7a2b352a62c3dfeb0f077c6207a93d8834481d 100644 (file)
@@ -20,6 +20,7 @@ enum {
        TCA_MIRRED_UNSPEC,
        TCA_MIRRED_TM,
        TCA_MIRRED_PARMS,
+       TCA_MIRRED_PAD,
        __TCA_MIRRED_MAX
 };
 #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
index 6663aeba0b9a78fd1c76816c8540fdfa4d0ee747..923457c9ebf0caf761bc24c897ba8a20f38e8c22 100644 (file)
@@ -10,6 +10,7 @@ enum {
        TCA_NAT_UNSPEC,
        TCA_NAT_PARMS,
        TCA_NAT_TM,
+       TCA_NAT_PAD,
        __TCA_NAT_MAX
 };
 #define TCA_NAT_MAX (__TCA_NAT_MAX - 1)
index 716cfabcd5b2fa6064588b118b7721f29477bf30..6389959a5157cf1f43338a3742093f66b14b564e 100644 (file)
@@ -10,6 +10,7 @@ enum {
        TCA_PEDIT_UNSPEC,
        TCA_PEDIT_TM,
        TCA_PEDIT_PARMS,
+       TCA_PEDIT_PAD,
        __TCA_PEDIT_MAX
 };
 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
index 7a2e910a5f0836e0ab92db331acef7780fb2029e..fecb5cc48c40667a5b75d7c06158d91443a11862 100644 (file)
@@ -39,6 +39,7 @@ enum {
        TCA_SKBEDIT_PRIORITY,
        TCA_SKBEDIT_QUEUE_MAPPING,
        TCA_SKBEDIT_MARK,
+       TCA_SKBEDIT_PAD,
        __TCA_SKBEDIT_MAX
 };
 #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
index f7b8d448b9603f9b1154472911f5a4b8ba663a54..31151ff6264f5ca966691c4ad0008c6b09cfe71a 100644 (file)
@@ -28,6 +28,7 @@ enum {
        TCA_VLAN_PARMS,
        TCA_VLAN_PUSH_VLAN_ID,
        TCA_VLAN_PUSH_VLAN_PROTOCOL,
+       TCA_VLAN_PAD,
        __TCA_VLAN_MAX,
 };
 #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
index 93533926035ca8d18e08f04eac3adacfcb266d8d..80ad90d0cfc23b0a67b4c6639eaf6df1d2abca25 100644 (file)
@@ -40,6 +40,7 @@ enum {
        TCP_METRICS_ATTR_FOPEN_COOKIE,          /* binary */
        TCP_METRICS_ATTR_SADDR_IPV4,            /* u32 */
        TCP_METRICS_ATTR_SADDR_IPV6,            /* binary */
+       TCP_METRICS_ATTR_PAD,
 
        __TCP_METRICS_ATTR_MAX,
 };
index 16574ea18f0cf62d743287779ffaed6205d120d0..2c8180f9156faf8c6429bb738e22101ceec11660 100644 (file)
@@ -36,6 +36,7 @@ struct udphdr {
 #define UDP_ENCAP_ESPINUDP_NON_IKE     1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
 #define UDP_ENCAP_ESPINUDP     2 /* draft-ietf-ipsec-udp-encaps-06 */
 #define UDP_ENCAP_L2TPINUDP    3 /* rfc2661 */
-
+#define UDP_ENCAP_GTP0         4 /* GSM TS 09.60 */
+#define UDP_ENCAP_GTP1U                5 /* 3GPP TS 29.060 */
 
 #endif /* _UAPI_LINUX_UDP_H */
index 06d6c6228a7a75cfbbd0426d376a441b69597459..d5ce71607972e82f37e7a2aaa83d9fd59c87f919 100644 (file)
@@ -899,7 +899,7 @@ struct usb_ssp_cap_descriptor {
        __le32 bmAttributes;
 #define USB_SSP_SUBLINK_SPEED_ATTRIBS  (0x1f << 0) /* sublink speed entries */
 #define USB_SSP_SUBLINK_SPEED_IDS      (0xf << 5)  /* speed ID entries */
-       __u16  wFunctionalitySupport;
+       __le16  wFunctionalitySupport;
 #define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
 #define USB_SSP_MIN_RX_LANE_COUNT              (0xf << 8)
 #define USB_SSP_MIN_TX_LANE_COUNT              (0xf << 12)
index c039f1d68a0929bfac96031d8eaa489be19edba9..086168e18ca83e462dcf088316d8f6c436cde42f 100644 (file)
 
 #define V4L2_DV_BT_CEA_3840X2160P24 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
 
 #define V4L2_DV_BT_CEA_3840X2160P25 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P30 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
 
 #define V4L2_DV_BT_CEA_3840X2160P50 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P60 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
 
 #define V4L2_DV_BT_CEA_4096X2160P24 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
 
 #define V4L2_DV_BT_CEA_4096X2160P25 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P30 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
 
 #define V4L2_DV_BT_CEA_4096X2160P50 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P60 { \
        .type = V4L2_DV_BT_656_1120, \
-       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+       V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+               V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
                594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
                V4L2_DV_BT_STD_CEA861, \
                V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
index c18264df9504c17ce11c84ef1a14f12a132cb35e..4cb65bbfa654852ab0d0c2e8b344731a86b2e132 100644 (file)
@@ -40,6 +40,8 @@
 #define VIRTIO_CONFIG_S_DRIVER_OK      4
 /* Driver has finished configuring features */
 #define VIRTIO_CONFIG_S_FEATURES_OK    8
+/* Device entered invalid state, driver must reset it */
+#define VIRTIO_CONFIG_S_NEEDS_RESET    0x40
 /* We've given up on this device. */
 #define VIRTIO_CONFIG_S_FAILED         0x80
 
index 2cd9e608d0d17d88985c3e32774e567da3236bb5..143338978b489bf68d4cb316cf2a7f938cdf5054 100644 (file)
@@ -302,6 +302,7 @@ enum xfrm_attr_type_t {
        XFRMA_SA_EXTRA_FLAGS,   /* __u32 */
        XFRMA_PROTO,            /* __u8 */
        XFRMA_ADDRESS_FILTER,   /* struct xfrm_address_filter */
+       XFRMA_PAD,
        __XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
index eeba75395f7d10b61fabf995d2b0157aec6d9ee5..ad66589f2ae6ec6bd1290fb112b2e8947bef6d24 100644 (file)
@@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
 int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
 void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
-                                  u32 pixel_format, int stride,
-                                  int u_offset, int v_offset);
+                                  unsigned int uv_stride,
+                                  unsigned int u_offset,
+                                  unsigned int v_offset);
 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
                              u32 pixel_format, int stride, int height);
 int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
@@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
 int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
                unsigned long bandwidth_mbs, int burstsize);
 void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width);
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
 void ipu_dmfc_put(struct dmfc_channel *dmfc);
 
index 96294ac937552178531266cb16ce04185e47c99a..9dc46cb8a0fd79be7f4bdae7ae8f5f4dbdcdb7f4 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #define xen_pfn_to_page(xen_pfn)       \
-       ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+       (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
 #define page_to_xen_pfn(page)          \
-       (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+       ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
 
 #define XEN_PFN_PER_PAGE       (PAGE_SIZE / XEN_PAGE_SIZE)
 
index 781c1399c6a3b0fa0b81c9567dd3096276a17d65..ade739f67f1df67fc17cf631da2994eef0e3d95c 100644 (file)
@@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *inode;
        struct ipc_namespace *ns = data;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = MQUEUE_MAGIC;
        sb->s_op = &mqueue_super_ops;
 
index be0abf669ced3b5b228a10d5ad6ac99576fae0f7..d781b077431f90102af4f8203c2e89ff30d6693a 100644 (file)
@@ -764,14 +764,21 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
+
 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
+
 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
 {
        return NULL;
 }
 
+const struct bpf_func_proto * __weak bpf_get_event_output_proto(void)
+{
+       return NULL;
+}
+
 /* Always built-in helper functions. */
 const struct bpf_func_proto bpf_tail_call_proto = {
        .func           = NULL,
@@ -787,6 +794,11 @@ void __weak bpf_int_jit_compile(struct bpf_prog *prog)
 {
 }
 
+bool __weak bpf_helper_changes_skb_data(void *func)
+{
+       return false;
+}
+
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
  */
index 50da680c479f030588314ed664c8d57200c28990..ad7a0573f71bcb26fb3956d8e1c802f3e5e64b87 100644 (file)
@@ -163,17 +163,26 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
        struct task_struct *task = current;
        char *buf = (char *) (long) r1;
 
-       if (!task)
-               return -EINVAL;
+       if (unlikely(!task))
+               goto err_clear;
 
-       strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+       strncpy(buf, task->comm, size);
+
+       /* Verifier guarantees that size > 0. For task->comm exceeding
+        * size, guarantee that buf is %NUL-terminated. Unconditionally
+        * done here to save the size test.
+        */
+       buf[size - 1] = 0;
        return 0;
+err_clear:
+       memset(buf, 0, size);
+       return -EINVAL;
 }
 
 const struct bpf_func_proto bpf_get_current_comm_proto = {
        .func           = bpf_get_current_comm,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_STACK,
+       .arg1_type      = ARG_PTR_TO_RAW_STACK,
        .arg2_type      = ARG_CONST_STACK_SIZE,
 };
index f2ece3c174a5b540b40492833048d6c8bd9f3f1c..8f94ca1860cfdcdd9509e40d40c3ad7cbf572e2d 100644 (file)
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
 {
        switch (type) {
        case BPF_TYPE_PROG:
-               atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+               raw = bpf_prog_inc(raw);
                break;
        case BPF_TYPE_MAP:
-               bpf_map_inc(raw, true);
+               raw = bpf_map_inc(raw, true);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
                goto out;
 
        raw = bpf_any_get(inode->i_private, *type);
-       touch_atime(&path);
+       if (!IS_ERR(raw))
+               touch_atime(&path);
 
        path_put(&path);
        return raw;
index adc5e4bd74f8bc3310cfe4b75257647886b2e11f..cf5e9f7ad13ad13ebb5b6bc5a775beca834c7c86 100644 (file)
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
        return f.file->private_data;
 }
 
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 {
-       atomic_inc(&map->refcnt);
+       if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&map->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
        if (uref)
                atomic_inc(&map->usercnt);
+       return map;
 }
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
        if (IS_ERR(map))
                return map;
 
-       bpf_map_inc(map, true);
+       map = bpf_map_inc(map, true);
        fdput(f);
 
        return map;
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
        return f.file->private_data;
 }
 
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+       if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&prog->aux->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
+       return prog;
+}
+
 /* called by sockets/tracing/seccomp before attaching program to an event
  * pairs with bpf_prog_put()
  */
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
        if (IS_ERR(prog))
                return prog;
 
-       atomic_inc(&prog->aux->refcnt);
+       prog = bpf_prog_inc(prog);
        fdput(f);
 
        return prog;
index 8233021538d34fcf024904eb1e794526d9776e18..84bff68cf80e0c20baf44df53b203c4e3ddd6b8c 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -136,13 +137,32 @@ enum bpf_reg_type {
        FRAME_PTR,               /* reg == frame_pointer */
        PTR_TO_STACK,            /* reg == frame_pointer + imm */
        CONST_IMM,               /* constant integer value */
+
+       /* PTR_TO_PACKET represents:
+        * skb->data
+        * skb->data + imm
+        * skb->data + (u16) var
+        * skb->data + (u16) var + imm
+        * if (range > 0) then [ptr, ptr + range - off) is safe to access
+        * if (id > 0) means that some 'var' was added
+        * if (off > 0) menas that 'imm' was added
+        */
+       PTR_TO_PACKET,
+       PTR_TO_PACKET_END,       /* skb->data + headlen */
 };
 
 struct reg_state {
        enum bpf_reg_type type;
        union {
-               /* valid when type == CONST_IMM | PTR_TO_STACK */
-               int imm;
+               /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+               s64 imm;
+
+               /* valid when type == PTR_TO_PACKET* */
+               struct {
+                       u32 id;
+                       u16 off;
+                       u16 range;
+               };
 
                /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
                 *   PTR_TO_MAP_VALUE_OR_NULL
@@ -205,6 +225,13 @@ struct verifier_env {
 #define BPF_COMPLEXITY_LIMIT_INSNS     65536
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
 
+struct bpf_call_arg_meta {
+       struct bpf_map *map_ptr;
+       bool raw_mode;
+       int regno;
+       int access_size;
+};
+
 /* verbose verifier prints what it's seeing
  * bpf_check() is called under lock, so no race to access these global vars
  */
@@ -240,40 +267,39 @@ static const char * const reg_type_str[] = {
        [FRAME_PTR]             = "fp",
        [PTR_TO_STACK]          = "fp",
        [CONST_IMM]             = "imm",
+       [PTR_TO_PACKET]         = "pkt",
+       [PTR_TO_PACKET_END]     = "pkt_end",
 };
 
-static const struct {
-       int map_type;
-       int func_id;
-} func_limit[] = {
-       {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-       {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
-};
-
-static void print_verifier_state(struct verifier_env *env)
+static void print_verifier_state(struct verifier_state *state)
 {
+       struct reg_state *reg;
        enum bpf_reg_type t;
        int i;
 
        for (i = 0; i < MAX_BPF_REG; i++) {
-               t = env->cur_state.regs[i].type;
+               reg = &state->regs[i];
+               t = reg->type;
                if (t == NOT_INIT)
                        continue;
                verbose(" R%d=%s", i, reg_type_str[t]);
                if (t == CONST_IMM || t == PTR_TO_STACK)
-                       verbose("%d", env->cur_state.regs[i].imm);
+                       verbose("%lld", reg->imm);
+               else if (t == PTR_TO_PACKET)
+                       verbose("(id=%d,off=%d,r=%d)",
+                               reg->id, reg->off, reg->range);
+               else if (t == UNKNOWN_VALUE && reg->imm)
+                       verbose("%lld", reg->imm);
                else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
                         t == PTR_TO_MAP_VALUE_OR_NULL)
                        verbose("(ks=%d,vs=%d)",
-                               env->cur_state.regs[i].map_ptr->key_size,
-                               env->cur_state.regs[i].map_ptr->value_size);
+                               reg->map_ptr->key_size,
+                               reg->map_ptr->value_size);
        }
        for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
-               if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
+               if (state->stack_slot_type[i] == STACK_SPILL)
                        verbose(" fp%d=%s", -MAX_BPF_STACK + i,
-                               reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
+                               reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
        }
        verbose("\n");
 }
@@ -480,7 +506,6 @@ static void init_reg_state(struct reg_state *regs)
        for (i = 0; i < MAX_BPF_REG; i++) {
                regs[i].type = NOT_INIT;
                regs[i].imm = 0;
-               regs[i].map_ptr = NULL;
        }
 
        /* frame pointer */
@@ -495,7 +520,6 @@ static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
        BUG_ON(regno >= MAX_BPF_REG);
        regs[regno].type = UNKNOWN_VALUE;
        regs[regno].imm = 0;
-       regs[regno].map_ptr = NULL;
 }
 
 enum reg_arg_type {
@@ -551,6 +575,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
        case PTR_TO_MAP_VALUE_OR_NULL:
        case PTR_TO_STACK:
        case PTR_TO_CTX:
+       case PTR_TO_PACKET:
+       case PTR_TO_PACKET_END:
        case FRAME_PTR:
        case CONST_PTR_TO_MAP:
                return true;
@@ -650,6 +676,27 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off,
        return 0;
 }
 
+#define MAX_PACKET_OFF 0xffff
+
+static int check_packet_access(struct verifier_env *env, u32 regno, int off,
+                              int size)
+{
+       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *reg = &regs[regno];
+       int linear_size = (int) reg->range - (int) reg->off;
+
+       if (linear_size < 0 || linear_size >= MAX_PACKET_OFF) {
+               verbose("verifier bug\n");
+               return -EFAULT;
+       }
+       if (off < 0 || off + size > linear_size) {
+               verbose("invalid access to packet, off=%d size=%d, allowed=%d\n",
+                       off, size, linear_size);
+               return -EACCES;
+       }
+       return 0;
+}
+
 /* check access to 'struct bpf_context' fields */
 static int check_ctx_access(struct verifier_env *env, int off, int size,
                            enum bpf_access_type t)
@@ -680,6 +727,45 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
        }
 }
 
+static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
+                              int off, int size)
+{
+       if (reg->type != PTR_TO_PACKET) {
+               if (off % size != 0) {
+                       verbose("misaligned access off %d size %d\n", off, size);
+                       return -EACCES;
+               } else {
+                       return 0;
+               }
+       }
+
+       switch (env->prog->type) {
+       case BPF_PROG_TYPE_SCHED_CLS:
+       case BPF_PROG_TYPE_SCHED_ACT:
+               break;
+       default:
+               verbose("verifier is misconfigured\n");
+               return -EACCES;
+       }
+
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+               /* misaligned access to packet is ok on x86,arm,arm64 */
+               return 0;
+
+       if (reg->id && size != 1) {
+               verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+               return -EACCES;
+       }
+
+       /* skb->data is NET_IP_ALIGN-ed */
+       if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
+               verbose("misaligned packet access off %d+%d+%d size %d\n",
+                       NET_IP_ALIGN, reg->off, off, size);
+               return -EACCES;
+       }
+       return 0;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -691,21 +777,21 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                            int value_regno)
 {
        struct verifier_state *state = &env->cur_state;
+       struct reg_state *reg = &state->regs[regno];
        int size, err = 0;
 
-       if (state->regs[regno].type == PTR_TO_STACK)
-               off += state->regs[regno].imm;
+       if (reg->type == PTR_TO_STACK)
+               off += reg->imm;
 
        size = bpf_size_to_bytes(bpf_size);
        if (size < 0)
                return size;
 
-       if (off % size != 0) {
-               verbose("misaligned access off %d size %d\n", off, size);
-               return -EACCES;
-       }
+       err = check_ptr_alignment(env, reg, off, size);
+       if (err)
+               return err;
 
-       if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
+       if (reg->type == PTR_TO_MAP_VALUE) {
                if (t == BPF_WRITE && value_regno >= 0 &&
                    is_pointer_value(env, value_regno)) {
                        verbose("R%d leaks addr into map\n", value_regno);
@@ -715,18 +801,25 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                if (!err && t == BPF_READ && value_regno >= 0)
                        mark_reg_unknown_value(state->regs, value_regno);
 
-       } else if (state->regs[regno].type == PTR_TO_CTX) {
+       } else if (reg->type == PTR_TO_CTX) {
                if (t == BPF_WRITE && value_regno >= 0 &&
                    is_pointer_value(env, value_regno)) {
                        verbose("R%d leaks addr into ctx\n", value_regno);
                        return -EACCES;
                }
                err = check_ctx_access(env, off, size, t);
-               if (!err && t == BPF_READ && value_regno >= 0)
+               if (!err && t == BPF_READ && value_regno >= 0) {
                        mark_reg_unknown_value(state->regs, value_regno);
+                       if (off == offsetof(struct __sk_buff, data) &&
+                           env->allow_ptr_leaks)
+                               /* note that reg.[id|off|range] == 0 */
+                               state->regs[value_regno].type = PTR_TO_PACKET;
+                       else if (off == offsetof(struct __sk_buff, data_end) &&
+                                env->allow_ptr_leaks)
+                               state->regs[value_regno].type = PTR_TO_PACKET_END;
+               }
 
-       } else if (state->regs[regno].type == FRAME_PTR ||
-                  state->regs[regno].type == PTR_TO_STACK) {
+       } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
                if (off >= 0 || off < -MAX_BPF_STACK) {
                        verbose("invalid stack off=%d size=%d\n", off, size);
                        return -EACCES;
@@ -742,11 +835,28 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                } else {
                        err = check_stack_read(state, off, size, value_regno);
                }
+       } else if (state->regs[regno].type == PTR_TO_PACKET) {
+               if (t == BPF_WRITE) {
+                       verbose("cannot write into packet\n");
+                       return -EACCES;
+               }
+               err = check_packet_access(env, regno, off, size);
+               if (!err && t == BPF_READ && value_regno >= 0)
+                       mark_reg_unknown_value(state->regs, value_regno);
        } else {
                verbose("R%d invalid mem access '%s'\n",
-                       regno, reg_type_str[state->regs[regno].type]);
+                       regno, reg_type_str[reg->type]);
                return -EACCES;
        }
+
+       if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks &&
+           state->regs[value_regno].type == UNKNOWN_VALUE) {
+               /* 1 or 2 byte load zero-extends, determine the number of
+                * zero upper bits. Not doing it fo 4 byte load, since
+                * such values cannot be added to ptr_to_packet anyway.
+                */
+               state->regs[value_regno].imm = 64 - size * 8;
+       }
        return err;
 }
 
@@ -787,7 +897,8 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
  * and all elements of stack are initialized
  */
 static int check_stack_boundary(struct verifier_env *env, int regno,
-                               int access_size, bool zero_size_allowed)
+                               int access_size, bool zero_size_allowed,
+                               struct bpf_call_arg_meta *meta)
 {
        struct verifier_state *state = &env->cur_state;
        struct reg_state *regs = state->regs;
@@ -813,6 +924,12 @@ static int check_stack_boundary(struct verifier_env *env, int regno,
                return -EACCES;
        }
 
+       if (meta && meta->raw_mode) {
+               meta->access_size = access_size;
+               meta->regno = regno;
+               return 0;
+       }
+
        for (i = 0; i < access_size; i++) {
                if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
                        verbose("invalid indirect read from stack off %d+%d size %d\n",
@@ -824,7 +941,8 @@ static int check_stack_boundary(struct verifier_env *env, int regno,
 }
 
 static int check_func_arg(struct verifier_env *env, u32 regno,
-                         enum bpf_arg_type arg_type, struct bpf_map **mapp)
+                         enum bpf_arg_type arg_type,
+                         struct bpf_call_arg_meta *meta)
 {
        struct reg_state *reg = env->cur_state.regs + regno;
        enum bpf_reg_type expected_type;
@@ -856,7 +974,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                expected_type = CONST_PTR_TO_MAP;
        } else if (arg_type == ARG_PTR_TO_CTX) {
                expected_type = PTR_TO_CTX;
-       } else if (arg_type == ARG_PTR_TO_STACK) {
+       } else if (arg_type == ARG_PTR_TO_STACK ||
+                  arg_type == ARG_PTR_TO_RAW_STACK) {
                expected_type = PTR_TO_STACK;
                /* One exception here. In case function allows for NULL to be
                 * passed in as argument, it's a CONST_IMM type. Final test
@@ -864,6 +983,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                 */
                if (reg->type == CONST_IMM && reg->imm == 0)
                        expected_type = CONST_IMM;
+               meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK;
        } else {
                verbose("unsupported arg_type %d\n", arg_type);
                return -EFAULT;
@@ -877,14 +997,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
 
        if (arg_type == ARG_CONST_MAP_PTR) {
                /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
-               *mapp = reg->map_ptr;
-
+               meta->map_ptr = reg->map_ptr;
        } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
                /* bpf_map_xxx(..., map_ptr, ..., key) call:
                 * check that [key, key + map->key_size) are within
                 * stack limits and initialized
                 */
-               if (!*mapp) {
+               if (!meta->map_ptr) {
                        /* in function declaration map_ptr must come before
                         * map_key, so that it's verified and known before
                         * we have to check map_key here. Otherwise it means
@@ -893,19 +1012,20 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                        verbose("invalid map_ptr to access map->key\n");
                        return -EACCES;
                }
-               err = check_stack_boundary(env, regno, (*mapp)->key_size,
-                                          false);
+               err = check_stack_boundary(env, regno, meta->map_ptr->key_size,
+                                          false, NULL);
        } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
                /* bpf_map_xxx(..., map_ptr, ..., value) call:
                 * check [value, value + map->value_size) validity
                 */
-               if (!*mapp) {
+               if (!meta->map_ptr) {
                        /* kernel subsystem misconfigured verifier */
                        verbose("invalid map_ptr to access map->value\n");
                        return -EACCES;
                }
-               err = check_stack_boundary(env, regno, (*mapp)->value_size,
-                                          false);
+               err = check_stack_boundary(env, regno,
+                                          meta->map_ptr->value_size,
+                                          false, NULL);
        } else if (arg_type == ARG_CONST_STACK_SIZE ||
                   arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
                bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO);
@@ -920,7 +1040,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                        return -EACCES;
                }
                err = check_stack_boundary(env, regno - 1, reg->imm,
-                                          zero_size_allowed);
+                                          zero_size_allowed, meta);
        }
 
        return err;
@@ -928,27 +1048,93 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
 
 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
 {
-       bool bool_map, bool_func;
-       int i;
-
        if (!map)
                return 0;
 
-       for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
-               bool_map = (map->map_type == func_limit[i].map_type);
-               bool_func = (func_id == func_limit[i].func_id);
-               /* only when map & func pair match it can continue.
-                * don't allow any other map type to be passed into
-                * the special func;
-                */
-               if (bool_func && bool_map != bool_func) {
-                       verbose("cannot pass map_type %d into func %d\n",
-                               map->map_type, func_id);
-                       return -EINVAL;
-               }
+       /* We need a two way check, first is from map perspective ... */
+       switch (map->map_type) {
+       case BPF_MAP_TYPE_PROG_ARRAY:
+               if (func_id != BPF_FUNC_tail_call)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+               if (func_id != BPF_FUNC_perf_event_read &&
+                   func_id != BPF_FUNC_perf_event_output)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_STACK_TRACE:
+               if (func_id != BPF_FUNC_get_stackid)
+                       goto error;
+               break;
+       default:
+               break;
+       }
+
+       /* ... and second from the function itself. */
+       switch (func_id) {
+       case BPF_FUNC_tail_call:
+               if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_perf_event_read:
+       case BPF_FUNC_perf_event_output:
+               if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_get_stackid:
+               if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
+                       goto error;
+               break;
+       default:
+               break;
        }
 
        return 0;
+error:
+       verbose("cannot pass map_type %d into func %d\n",
+               map->map_type, func_id);
+       return -EINVAL;
+}
+
+static int check_raw_mode(const struct bpf_func_proto *fn)
+{
+       int count = 0;
+
+       if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)
+               count++;
+       if (fn->arg2_type == ARG_PTR_TO_RAW_STACK)
+               count++;
+       if (fn->arg3_type == ARG_PTR_TO_RAW_STACK)
+               count++;
+       if (fn->arg4_type == ARG_PTR_TO_RAW_STACK)
+               count++;
+       if (fn->arg5_type == ARG_PTR_TO_RAW_STACK)
+               count++;
+
+       return count > 1 ? -EINVAL : 0;
+}
+
+static void clear_all_pkt_pointers(struct verifier_env *env)
+{
+       struct verifier_state *state = &env->cur_state;
+       struct reg_state *regs = state->regs, *reg;
+       int i;
+
+       for (i = 0; i < MAX_BPF_REG; i++)
+               if (regs[i].type == PTR_TO_PACKET ||
+                   regs[i].type == PTR_TO_PACKET_END)
+                       mark_reg_unknown_value(regs, i);
+
+       for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+               if (state->stack_slot_type[i] != STACK_SPILL)
+                       continue;
+               reg = &state->spilled_regs[i / BPF_REG_SIZE];
+               if (reg->type != PTR_TO_PACKET &&
+                   reg->type != PTR_TO_PACKET_END)
+                       continue;
+               reg->type = UNKNOWN_VALUE;
+               reg->imm = 0;
+       }
 }
 
 static int check_call(struct verifier_env *env, int func_id)
@@ -956,8 +1142,9 @@ static int check_call(struct verifier_env *env, int func_id)
        struct verifier_state *state = &env->cur_state;
        const struct bpf_func_proto *fn = NULL;
        struct reg_state *regs = state->regs;
-       struct bpf_map *map = NULL;
        struct reg_state *reg;
+       struct bpf_call_arg_meta meta;
+       bool changes_data;
        int i, err;
 
        /* find function prototype */
@@ -980,23 +1167,45 @@ static int check_call(struct verifier_env *env, int func_id)
                return -EINVAL;
        }
 
+       changes_data = bpf_helper_changes_skb_data(fn->func);
+
+       memset(&meta, 0, sizeof(meta));
+
+       /* We only support one arg being in raw mode at the moment, which
+        * is sufficient for the helper functions we have right now.
+        */
+       err = check_raw_mode(fn);
+       if (err) {
+               verbose("kernel subsystem misconfigured func %d\n", func_id);
+               return err;
+       }
+
        /* check args */
-       err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
+       err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
        if (err)
                return err;
-       err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
+       err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
        if (err)
                return err;
-       err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
+       err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
        if (err)
                return err;
-       err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
+       err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
        if (err)
                return err;
-       err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
+       err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
        if (err)
                return err;
 
+       /* Mark slots with STACK_MISC in case of raw mode, stack offset
+        * is inferred from register state.
+        */
+       for (i = 0; i < meta.access_size; i++) {
+               err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
+               if (err)
+                       return err;
+       }
+
        /* reset caller saved regs */
        for (i = 0; i < CALLER_SAVED_REGS; i++) {
                reg = regs + caller_saved[i];
@@ -1015,28 +1224,211 @@ static int check_call(struct verifier_env *env, int func_id)
                 * can check 'value_size' boundary of memory access
                 * to map element returned from bpf_map_lookup_elem()
                 */
-               if (map == NULL) {
+               if (meta.map_ptr == NULL) {
                        verbose("kernel subsystem misconfigured verifier\n");
                        return -EINVAL;
                }
-               regs[BPF_REG_0].map_ptr = map;
+               regs[BPF_REG_0].map_ptr = meta.map_ptr;
        } else {
                verbose("unknown return type %d of func %d\n",
                        fn->ret_type, func_id);
                return -EINVAL;
        }
 
-       err = check_map_func_compatibility(map, func_id);
+       err = check_map_func_compatibility(meta.map_ptr, func_id);
        if (err)
                return err;
 
+       if (changes_data)
+               clear_all_pkt_pointers(env);
+       return 0;
+}
+
+static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn)
+{
+       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *dst_reg = &regs[insn->dst_reg];
+       struct reg_state *src_reg = &regs[insn->src_reg];
+       s32 imm;
+
+       if (BPF_SRC(insn->code) == BPF_K) {
+               /* pkt_ptr += imm */
+               imm = insn->imm;
+
+add_imm:
+               if (imm <= 0) {
+                       verbose("addition of negative constant to packet pointer is not allowed\n");
+                       return -EACCES;
+               }
+               if (imm >= MAX_PACKET_OFF ||
+                   imm + dst_reg->off >= MAX_PACKET_OFF) {
+                       verbose("constant %d is too large to add to packet pointer\n",
+                               imm);
+                       return -EACCES;
+               }
+               /* a constant was added to pkt_ptr.
+                * Remember it while keeping the same 'id'
+                */
+               dst_reg->off += imm;
+       } else {
+               if (src_reg->type == CONST_IMM) {
+                       /* pkt_ptr += reg where reg is known constant */
+                       imm = src_reg->imm;
+                       goto add_imm;
+               }
+               /* disallow pkt_ptr += reg
+                * if reg is not uknown_value with guaranteed zero upper bits
+                * otherwise pkt_ptr may overflow and addition will become
+                * subtraction which is not allowed
+                */
+               if (src_reg->type != UNKNOWN_VALUE) {
+                       verbose("cannot add '%s' to ptr_to_packet\n",
+                               reg_type_str[src_reg->type]);
+                       return -EACCES;
+               }
+               if (src_reg->imm < 48) {
+                       verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n",
+                               src_reg->imm);
+                       return -EACCES;
+               }
+               /* dst_reg stays as pkt_ptr type and since some positive
+                * integer value was added to the pointer, increment its 'id'
+                */
+               dst_reg->id++;
+
+               /* something was added to pkt_ptr, set range and off to zero */
+               dst_reg->off = 0;
+               dst_reg->range = 0;
+       }
+       return 0;
+}
+
+static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
+{
+       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *dst_reg = &regs[insn->dst_reg];
+       u8 opcode = BPF_OP(insn->code);
+       s64 imm_log2;
+
+       /* for type == UNKNOWN_VALUE:
+        * imm > 0 -> number of zero upper bits
+        * imm == 0 -> don't track which is the same as all bits can be non-zero
+        */
+
+       if (BPF_SRC(insn->code) == BPF_X) {
+               struct reg_state *src_reg = &regs[insn->src_reg];
+
+               if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
+                   dst_reg->imm && opcode == BPF_ADD) {
+                       /* dreg += sreg
+                        * where both have zero upper bits. Adding them
+                        * can only result making one more bit non-zero
+                        * in the larger value.
+                        * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
+                        *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
+                        */
+                       dst_reg->imm = min(dst_reg->imm, src_reg->imm);
+                       dst_reg->imm--;
+                       return 0;
+               }
+               if (src_reg->type == CONST_IMM && src_reg->imm > 0 &&
+                   dst_reg->imm && opcode == BPF_ADD) {
+                       /* dreg += sreg
+                        * where dreg has zero upper bits and sreg is const.
+                        * Adding them can only result making one more bit
+                        * non-zero in the larger value.
+                        */
+                       imm_log2 = __ilog2_u64((long long)src_reg->imm);
+                       dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
+                       dst_reg->imm--;
+                       return 0;
+               }
+               /* all other cases non supported yet, just mark dst_reg */
+               dst_reg->imm = 0;
+               return 0;
+       }
+
+       /* sign extend 32-bit imm into 64-bit to make sure that
+        * negative values occupy bit 63. Note ilog2() would have
+        * been incorrect, since sizeof(insn->imm) == 4
+        */
+       imm_log2 = __ilog2_u64((long long)insn->imm);
+
+       if (dst_reg->imm && opcode == BPF_LSH) {
+               /* reg <<= imm
+                * if reg was a result of 2 byte load, then its imm == 48
+                * which means that upper 48 bits are zero and shifting this reg
+                * left by 4 would mean that upper 44 bits are still zero
+                */
+               dst_reg->imm -= insn->imm;
+       } else if (dst_reg->imm && opcode == BPF_MUL) {
+               /* reg *= imm
+                * if multiplying by 14 subtract 4
+                * This is conservative calculation of upper zero bits.
+                * It's not trying to special case insn->imm == 1 or 0 cases
+                */
+               dst_reg->imm -= imm_log2 + 1;
+       } else if (opcode == BPF_AND) {
+               /* reg &= imm */
+               dst_reg->imm = 63 - imm_log2;
+       } else if (dst_reg->imm && opcode == BPF_ADD) {
+               /* reg += imm */
+               dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
+               dst_reg->imm--;
+       } else if (opcode == BPF_RSH) {
+               /* reg >>= imm
+                * which means that after right shift, upper bits will be zero
+                * note that verifier already checked that
+                * 0 <= imm < 64 for shift insn
+                */
+               dst_reg->imm += insn->imm;
+               if (unlikely(dst_reg->imm > 64))
+                       /* some dumb code did:
+                        * r2 = *(u32 *)mem;
+                        * r2 >>= 32;
+                        * and all bits are zero now */
+                       dst_reg->imm = 64;
+       } else {
+               /* all other alu ops, means that we don't know what will
+                * happen to the value, mark it with unknown number of zero bits
+                */
+               dst_reg->imm = 0;
+       }
+
+       if (dst_reg->imm < 0) {
+               /* all 64 bits of the register can contain non-zero bits
+                * and such value cannot be added to ptr_to_packet, since it
+                * may overflow, mark it as unknown to avoid further eval
+                */
+               dst_reg->imm = 0;
+       }
+       return 0;
+}
+
+static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
+{
+       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *dst_reg = &regs[insn->dst_reg];
+       struct reg_state *src_reg = &regs[insn->src_reg];
+       u8 opcode = BPF_OP(insn->code);
+
+       /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
+        * Don't care about overflow or negative values, just add them
+        */
+       if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm += insn->imm;
+       else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm += src_reg->imm;
+       else
+               mark_reg_unknown_value(regs, insn->dst_reg);
        return 0;
 }
 
 /* check validity of 32-bit and 64-bit arithmetic operations */
 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
 {
-       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *regs = env->cur_state.regs, *dst_reg;
        u8 opcode = BPF_OP(insn->code);
        int err;
 
@@ -1125,8 +1517,6 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
 
        } else {        /* all other ALU ops: and, sub, xor, add, ... */
 
-               bool stack_relative = false;
-
                if (BPF_SRC(insn->code) == BPF_X) {
                        if (insn->imm != 0 || insn->off != 0) {
                                verbose("BPF_ALU uses reserved fields\n");
@@ -1164,11 +1554,34 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
                        }
                }
 
+               /* check dest operand */
+               err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
+               if (err)
+                       return err;
+
+               dst_reg = &regs[insn->dst_reg];
+
                /* pattern match 'bpf_add Rx, imm' instruction */
                if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
-                   regs[insn->dst_reg].type == FRAME_PTR &&
-                   BPF_SRC(insn->code) == BPF_K) {
-                       stack_relative = true;
+                   dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) {
+                       dst_reg->type = PTR_TO_STACK;
+                       dst_reg->imm = insn->imm;
+                       return 0;
+               } else if (opcode == BPF_ADD &&
+                          BPF_CLASS(insn->code) == BPF_ALU64 &&
+                          dst_reg->type == PTR_TO_PACKET) {
+                       /* ptr_to_packet += K|X */
+                       return check_packet_ptr_add(env, insn);
+               } else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+                          dst_reg->type == UNKNOWN_VALUE &&
+                          env->allow_ptr_leaks) {
+                       /* unknown += K|X */
+                       return evaluate_reg_alu(env, insn);
+               } else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+                          dst_reg->type == CONST_IMM &&
+                          env->allow_ptr_leaks) {
+                       /* reg_imm += K|X */
+                       return evaluate_reg_imm_alu(env, insn);
                } else if (is_pointer_value(env, insn->dst_reg)) {
                        verbose("R%d pointer arithmetic prohibited\n",
                                insn->dst_reg);
@@ -1180,24 +1593,45 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
                        return -EACCES;
                }
 
-               /* check dest operand */
-               err = check_reg_arg(regs, insn->dst_reg, DST_OP);
-               if (err)
-                       return err;
-
-               if (stack_relative) {
-                       regs[insn->dst_reg].type = PTR_TO_STACK;
-                       regs[insn->dst_reg].imm = insn->imm;
-               }
+               /* mark dest operand */
+               mark_reg_unknown_value(regs, insn->dst_reg);
        }
 
        return 0;
 }
 
+static void find_good_pkt_pointers(struct verifier_env *env,
+                                  struct reg_state *dst_reg)
+{
+       struct verifier_state *state = &env->cur_state;
+       struct reg_state *regs = state->regs, *reg;
+       int i;
+       /* r2 = r3;
+        * r2 += 8
+        * if (r2 > pkt_end) goto somewhere
+        * r2 == dst_reg, pkt_end == src_reg,
+        * r2=pkt(id=n,off=8,r=0)
+        * r3=pkt(id=n,off=0,r=0)
+        * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
+        * so that range of bytes [r3, r3 + 8) is safe to access
+        */
+       for (i = 0; i < MAX_BPF_REG; i++)
+               if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
+                       regs[i].range = dst_reg->off;
+
+       for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+               if (state->stack_slot_type[i] != STACK_SPILL)
+                       continue;
+               reg = &state->spilled_regs[i / BPF_REG_SIZE];
+               if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
+                       reg->range = dst_reg->off;
+       }
+}
+
 static int check_cond_jmp_op(struct verifier_env *env,
                             struct bpf_insn *insn, int *insn_idx)
 {
-       struct reg_state *regs = env->cur_state.regs;
+       struct reg_state *regs = env->cur_state.regs, *dst_reg;
        struct verifier_state *other_branch;
        u8 opcode = BPF_OP(insn->code);
        int err;
@@ -1235,11 +1669,12 @@ static int check_cond_jmp_op(struct verifier_env *env,
        if (err)
                return err;
 
+       dst_reg = &regs[insn->dst_reg];
+
        /* detect if R == 0 where R was initialized to zero earlier */
        if (BPF_SRC(insn->code) == BPF_K &&
            (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-           regs[insn->dst_reg].type == CONST_IMM &&
-           regs[insn->dst_reg].imm == insn->imm) {
+           dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) {
                if (opcode == BPF_JEQ) {
                        /* if (imm == imm) goto pc+off;
                         * only follow the goto, ignore fall-through
@@ -1261,44 +1696,30 @@ static int check_cond_jmp_op(struct verifier_env *env,
 
        /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
        if (BPF_SRC(insn->code) == BPF_K &&
-           insn->imm == 0 && (opcode == BPF_JEQ ||
-                              opcode == BPF_JNE) &&
-           regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
+           insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
+           dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
                if (opcode == BPF_JEQ) {
                        /* next fallthrough insn can access memory via
                         * this register
                         */
                        regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
                        /* branch targer cannot access it, since reg == 0 */
-                       other_branch->regs[insn->dst_reg].type = CONST_IMM;
-                       other_branch->regs[insn->dst_reg].imm = 0;
+                       mark_reg_unknown_value(other_branch->regs,
+                                              insn->dst_reg);
                } else {
                        other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-                       regs[insn->dst_reg].type = CONST_IMM;
-                       regs[insn->dst_reg].imm = 0;
+                       mark_reg_unknown_value(regs, insn->dst_reg);
                }
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               find_good_pkt_pointers(env, dst_reg);
        } else if (is_pointer_value(env, insn->dst_reg)) {
                verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
                return -EACCES;
-       } else if (BPF_SRC(insn->code) == BPF_K &&
-                  (opcode == BPF_JEQ || opcode == BPF_JNE)) {
-
-               if (opcode == BPF_JEQ) {
-                       /* detect if (R == imm) goto
-                        * and in the target state recognize that R = imm
-                        */
-                       other_branch->regs[insn->dst_reg].type = CONST_IMM;
-                       other_branch->regs[insn->dst_reg].imm = insn->imm;
-               } else {
-                       /* detect if (R != imm) goto
-                        * and in the fall-through state recognize that R = imm
-                        */
-                       regs[insn->dst_reg].type = CONST_IMM;
-                       regs[insn->dst_reg].imm = insn->imm;
-               }
        }
        if (log_level)
-               print_verifier_state(env);
+               print_verifier_state(&env->cur_state);
        return 0;
 }
 
@@ -1376,13 +1797,14 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
        int i, err;
 
        if (!may_access_skb(env->prog->type)) {
-               verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
+               verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
                return -EINVAL;
        }
 
        if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+           BPF_SIZE(insn->code) == BPF_DW ||
            (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
-               verbose("BPF_LD_ABS uses reserved fields\n");
+               verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
                return -EINVAL;
        }
 
@@ -1615,6 +2037,58 @@ err_free:
        return ret;
 }
 
+/* the following conditions reduce the number of explored insns
+ * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
+ */
+static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
+{
+       if (old->id != cur->id)
+               return false;
+
+       /* old ptr_to_packet is more conservative, since it allows smaller
+        * range. Ex:
+        * old(off=0,r=10) is equal to cur(off=0,r=20), because
+        * old(off=0,r=10) means that with range=10 the verifier proceeded
+        * further and found no issues with the program. Now we're in the same
+        * spot with cur(off=0,r=20), so we're safe too, since anything further
+        * will only be looking at most 10 bytes after this pointer.
+        */
+       if (old->off == cur->off && old->range < cur->range)
+               return true;
+
+       /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0)
+        * since both cannot be used for packet access and safe(old)
+        * pointer has smaller off that could be used for further
+        * 'if (ptr > data_end)' check
+        * Ex:
+        * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean
+        * that we cannot access the packet.
+        * The safe range is:
+        * [ptr, ptr + range - off)
+        * so whenever off >=range, it means no safe bytes from this pointer.
+        * When comparing old->off <= cur->off, it means that older code
+        * went with smaller offset and that offset was later
+        * used to figure out the safe range after 'if (ptr > data_end)' check
+        * Say, 'old' state was explored like:
+        * ... R3(off=0, r=0)
+        * R4 = R3 + 20
+        * ... now R4(off=20,r=0)  <-- here
+        * if (R4 > data_end)
+        * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access.
+        * ... the code further went all the way to bpf_exit.
+        * Now the 'cur' state at the mark 'here' has R4(off=30,r=0).
+        * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier
+        * goes further, such cur_R4 will give larger safe packet range after
+        * 'if (R4 > data_end)' and all further insn were already good with r=20,
+        * so they will be good with r=30 and we can prune the search.
+        */
+       if (old->off <= cur->off &&
+           old->off >= old->range && cur->off >= cur->range)
+               return true;
+
+       return false;
+}
+
 /* compare two verifier states
  *
  * all states stored in state_list are known to be valid, since
@@ -1643,17 +2117,25 @@ err_free:
  */
 static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
 {
+       struct reg_state *rold, *rcur;
        int i;
 
        for (i = 0; i < MAX_BPF_REG; i++) {
-               if (memcmp(&old->regs[i], &cur->regs[i],
-                          sizeof(old->regs[0])) != 0) {
-                       if (old->regs[i].type == NOT_INIT ||
-                           (old->regs[i].type == UNKNOWN_VALUE &&
-                            cur->regs[i].type != NOT_INIT))
-                               continue;
-                       return false;
-               }
+               rold = &old->regs[i];
+               rcur = &cur->regs[i];
+
+               if (memcmp(rold, rcur, sizeof(*rold)) == 0)
+                       continue;
+
+               if (rold->type == NOT_INIT ||
+                   (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
+                       continue;
+
+               if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
+                   compare_ptrs_to_packet(rold, rcur))
+                       continue;
+
+               return false;
        }
 
        for (i = 0; i < MAX_BPF_STACK; i++) {
@@ -1775,7 +2257,7 @@ static int do_check(struct verifier_env *env)
 
                if (log_level && do_print_state) {
                        verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
-                       print_verifier_state(env);
+                       print_verifier_state(&env->cur_state);
                        do_print_state = false;
                }
 
@@ -1987,6 +2469,7 @@ process_bpf_exit:
                insn_idx++;
        }
 
+       verbose("processed %d insns\n", insn_processed);
        return 0;
 }
 
@@ -2038,7 +2521,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                        if (IS_ERR(map)) {
                                verbose("fd %d is not pointing to valid bpf_map\n",
                                        insn->imm);
-                               fdput(f);
                                return PTR_ERR(map);
                        }
 
@@ -2058,15 +2540,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                                return -E2BIG;
                        }
 
-                       /* remember this map */
-                       env->used_maps[env->used_map_cnt++] = map;
-
                        /* hold the map. If the program is rejected by verifier,
                         * the map will be released by release_maps() or it
                         * will be used by the valid program until it's unloaded
                         * and all maps are released in free_bpf_prog_info()
                         */
-                       bpf_map_inc(map, false);
+                       map = bpf_map_inc(map, false);
+                       if (IS_ERR(map)) {
+                               fdput(f);
+                               return PTR_ERR(map);
+                       }
+                       env->used_maps[env->used_map_cnt++] = map;
+
                        fdput(f);
 next_insn:
                        insn++;
index 671dc05c0b0fd6b732cf03d6efec6a5dd5a3557d..909a7d31ffd3d3083c2253aefd4253715bcb2278 100644 (file)
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
                                    size_t nbytes, loff_t off, bool threadgroup)
 {
        struct task_struct *tsk;
+       struct cgroup_subsys *ss;
        struct cgroup *cgrp;
        pid_t pid;
-       int ret;
+       int ssid, ret;
 
        if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
                return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
        rcu_read_unlock();
 out_unlock_threadgroup:
        percpu_up_write(&cgroup_threadgroup_rwsem);
+       for_each_subsys(ss, ssid)
+               if (ss->post_attach)
+                       ss->post_attach();
        cgroup_kn_unlock(of->kn);
-       cpuset_post_attach_flush();
        return ret ?: nbytes;
 }
 
index 6ea42e8da861b05077d01a23e15bb140afffa605..3e3f6e49eabbc0dc62eefc02a201f95508032dab 100644 (file)
@@ -36,6 +36,7 @@
  * @target:    The target state
  * @thread:    Pointer to the hotplug thread
  * @should_run:        Thread should execute
+ * @rollback:  Perform a rollback
  * @cb_stat:   The state for a single callback (install/uninstall)
  * @cb:                Single callback function (install/uninstall)
  * @result:    Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
 #ifdef CONFIG_SMP
        struct task_struct      *thread;
        bool                    should_run;
+       bool                    rollback;
        enum cpuhp_state        cb_state;
        int                     (*cb)(unsigned int cpu);
        int                     result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
        return __cpu_notify(val, cpu, -1, NULL);
 }
 
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+       BUG_ON(cpu_notify(val, cpu));
+}
+
 /* Notifier wrappers for transitioning to state machine */
 static int notify_prepare(unsigned int cpu)
 {
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
                } else {
                        ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
                }
+       } else if (st->rollback) {
+               BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+
+               undo_cpu_down(cpu, st, cpuhp_ap_states);
+               /*
+                * This is a momentary workaround to keep the notifier users
+                * happy. Will go away once we got rid of the notifiers.
+                */
+               cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+               st->rollback = false;
        } else {
                /* Cannot happen .... */
                BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
        read_unlock(&tasklist_lock);
 }
 
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
-       BUG_ON(cpu_notify(val, cpu));
-}
-
 static int notify_down_prepare(unsigned int cpu)
 {
        int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
         */
        err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
        if (err) {
-               /* CPU didn't die: tell everyone.  Can't complain. */
-               cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+               /* CPU refused to die */
                irq_unlock_sparse();
+               /* Unpark the hotplug thread so we can rollback there */
+               kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
                return err;
        }
        BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
+       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+               st->target = prev_state;
+               st->rollback = true;
+               cpuhp_kick_ap_work(cpu);
+       }
 
        hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
                .name                   = "notify:online",
                .startup                = notify_online,
                .teardown               = notify_down_prepare,
+               .skip_onerr             = true,
        },
 #endif
        /*
index 00ab5c2b7c5b9df67dfd57f0b91b16bf3bd80960..1902956baba1fa19d915c2ccad402a64209b4860 100644 (file)
@@ -58,7 +58,6 @@
 #include <asm/uaccess.h>
 #include <linux/atomic.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/cgroup.h>
 #include <linux/wait.h>
 
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
        }
 }
 
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
 {
        flush_workqueue(cpuset_migrate_mm_wq);
 }
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
        .can_attach     = cpuset_can_attach,
        .cancel_attach  = cpuset_cancel_attach,
        .attach         = cpuset_attach,
+       .post_attach    = cpuset_post_attach,
        .bind           = cpuset_bind,
        .legacy_cftypes = files,
        .early_init     = true,
index 9a01019ff7c83c78676b54af4447226660837d36..0bdc6e7d4908f23ae79f84a13f0bd112e13f9518 100644 (file)
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
-       if (sysctl_perf_cpu_time_max_percent == 100) {
+       if (sysctl_perf_cpu_time_max_percent == 100 ||
+           sysctl_perf_cpu_time_max_percent == 0) {
                printk(KERN_WARNING
                       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
                WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
  * function.
  *
  * Lock order:
+ *    cred_guard_mutex
  *     task_struct::perf_event_mutex
  *       perf_event_context::mutex
  *         perf_event::child_mutex;
@@ -2417,14 +2419,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                        cpuctx->task_ctx = NULL;
        }
 
-       is_active ^= ctx->is_active; /* changed bits */
-
+       /*
+        * Always update time if it was set; not only when it changes.
+        * Otherwise we can 'forget' to update time for any but the last
+        * context we sched out. For example:
+        *
+        *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
+        *   ctx_sched_out(.event_type = EVENT_PINNED)
+        *
+        * would only update time for the pinned events.
+        */
        if (is_active & EVENT_TIME) {
                /* update (and stop) ctx time */
                update_context_time(ctx);
                update_cgrp_time_from_cpuctx(cpuctx);
        }
 
+       is_active ^= ctx->is_active; /* changed bits */
+
        if (!ctx->nr_active || !(is_active & EVENT_ALL))
                return;
 
@@ -3410,7 +3422,6 @@ static struct task_struct *
 find_lively_task_by_vpid(pid_t vpid)
 {
        struct task_struct *task;
-       int err;
 
        rcu_read_lock();
        if (!vpid)
@@ -3424,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
        if (!task)
                return ERR_PTR(-ESRCH);
 
-       /* Reuse ptrace permission checks for now. */
-       err = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
-               goto errout;
-
        return task;
-errout:
-       put_task_struct(task);
-       return ERR_PTR(err);
-
 }
 
 /*
@@ -6731,7 +6733,6 @@ void perf_swevent_put_recursion_context(int rctx)
 
        put_recursion_context(swhash->recursion, rctx);
 }
-EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
 
 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
@@ -6988,6 +6989,25 @@ static int perf_tp_event_match(struct perf_event *event,
        return 1;
 }
 
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+                              struct trace_event_call *call, u64 count,
+                              struct pt_regs *regs, struct hlist_head *head,
+                              struct task_struct *task)
+{
+       struct bpf_prog *prog = call->prog;
+
+       if (prog) {
+               *(struct pt_regs **)raw_data = regs;
+               if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
+                       perf_swevent_put_recursion_context(rctx);
+                       return;
+               }
+       }
+       perf_tp_event(call->event.type, count, raw_data, size, regs, head,
+                     rctx, task);
+}
+EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
+
 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                   struct pt_regs *regs, struct hlist_head *head, int rctx,
                   struct task_struct *task)
@@ -8418,6 +8438,24 @@ SYSCALL_DEFINE5(perf_event_open,
 
        get_online_cpus();
 
+       if (task) {
+               err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+               if (err)
+                       goto err_cpus;
+
+               /*
+                * Reuse ptrace permission checks for now.
+                *
+                * We must hold cred_guard_mutex across this and any potential
+                * perf_install_in_context() call for this new event to
+                * serialize against exec() altering our credentials (and the
+                * perf_event_exit_task() that could imply).
+                */
+               err = -EACCES;
+               if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+                       goto err_cred;
+       }
+
        if (flags & PERF_FLAG_PID_CGROUP)
                cgroup_fd = pid;
 
@@ -8425,7 +8463,7 @@ SYSCALL_DEFINE5(perf_event_open,
                                 NULL, NULL, cgroup_fd);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
-               goto err_cpus;
+               goto err_cred;
        }
 
        if (is_sampling_event(event)) {
@@ -8484,11 +8522,6 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_context;
        }
 
-       if (task) {
-               put_task_struct(task);
-               task = NULL;
-       }
-
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -8547,6 +8580,7 @@ SYSCALL_DEFINE5(perf_event_open,
                                        f_flags);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
+               event_file = NULL;
                goto err_context;
        }
 
@@ -8585,6 +8619,11 @@ SYSCALL_DEFINE5(perf_event_open,
 
        WARN_ON_ONCE(ctx->parent_ctx);
 
+       /*
+        * This is the point on no return; we cannot fail hereafter. This is
+        * where we start modifying current state.
+        */
+
        if (move_group) {
                /*
                 * See perf_event_ctx_lock() for comments on the details
@@ -8656,6 +8695,11 @@ SYSCALL_DEFINE5(perf_event_open,
                mutex_unlock(&gctx->mutex);
        mutex_unlock(&ctx->mutex);
 
+       if (task) {
+               mutex_unlock(&task->signal->cred_guard_mutex);
+               put_task_struct(task);
+       }
+
        put_online_cpus();
 
        mutex_lock(&current->perf_event_mutex);
@@ -8688,6 +8732,9 @@ err_alloc:
         */
        if (!event_file)
                free_event(event);
+err_cred:
+       if (task)
+               mutex_unlock(&task->signal->cred_guard_mutex);
 err_cpus:
        put_online_cpus();
 err_task:
@@ -8972,6 +9019,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 
 /*
  * When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
  */
 void perf_event_exit_task(struct task_struct *child)
 {
index 220fc17b9718856ca906dadfb0fe4c4d4823d689..7edc95edfaee10ee0fe8bdafc8f1caf81d883762 100644 (file)
@@ -321,7 +321,7 @@ retry:
        copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 
        ret = __replace_page(vma, vaddr, old_page, new_page);
-       page_cache_release(new_page);
+       put_page(new_page);
 put_old:
        put_page(old_page);
 
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
         * see uprobe_register().
         */
        if (mapping->a_ops->readpage)
-               page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
+               page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
        else
-               page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
        if (IS_ERR(page))
                return PTR_ERR(page);
 
        copy_from_page(page, offset, insn, nbytes);
-       page_cache_release(page);
+       put_page(page);
 
        return 0;
 }
index a5d2e74c89e0b217df98326e5febf3caf687687c..c20f06f38ef35a4776a61c3579a6de813e466ce0 100644 (file)
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        if (unlikely(should_fail_futex(true)))
                ret = -EFAULT;
 
-       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
                ret = -EFAULT;
-       else if (curval != uval)
-               ret = -EINVAL;
+       } else if (curval != uval) {
+               /*
+                * If a unconditional UNLOCK_PI operation (user space did not
+                * try the TID->0 transition) raced with a waiter setting the
+                * FUTEX_WAITERS flag between get_user() and locking the hash
+                * bucket lock, retry the operation.
+                */
+               if ((FUTEX_TID_MASK & curval) == uval)
+                       ret = -EAGAIN;
+               else
+                       ret = -EINVAL;
+       }
        if (ret) {
                raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
                return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
        if (likely(&hb1->chain != &hb2->chain)) {
                plist_del(&q->list, &hb1->chain);
                hb_waiters_dec(hb1);
-               plist_add(&q->list, &hb2->chain);
                hb_waiters_inc(hb2);
+               plist_add(&q->list, &hb2->chain);
                q->lock_ptr = &hb2->lock;
        }
        get_futex_key_refs(key2);
@@ -2622,6 +2632,15 @@ retry:
                 */
                if (ret == -EFAULT)
                        goto pi_faulted;
+               /*
+                * A unconditional UNLOCK_PI op raced against a waiter
+                * setting the FUTEX_WAITERS bit. Try again.
+                */
+               if (ret == -EAGAIN) {
+                       spin_unlock(&hb->lock);
+                       put_futex_key(&key);
+                       goto retry;
+               }
                /*
                 * wake_futex_pi has detected invalid state. Tell user
                 * space.
index c37f34b00a115e5fd1d69a5268f031fc7cde6862..14777af8e0977aed249e724601a789c7e21ffd0f 100644 (file)
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
                data = irq_get_irq_data(virq + i);
                cpumask_copy(data->common->affinity, dest);
                data->common->ipi_offset = offset;
+               irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
        }
        return virq;
 
index 3efbee0834a85df450bc2156eb96a414527275d0..a02f2dddd1d710b7ea33bd58963e2d9d388266c1 100644 (file)
@@ -1,5 +1,6 @@
 #define pr_fmt(fmt) "kcov: " fmt
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
  * Entry point from instrumented code.
  * This is called once per basic-block/edge.
  */
-void __sanitizer_cov_trace_pc(void)
+void notrace __sanitizer_cov_trace_pc(void)
 {
        struct task_struct *t;
        enum kcov_mode mode;
index 8d34308ea449ad31bae472f0403c5f1b25324683..1391d3ee3b8666c07b677ffd35a5fd2f2b2882d7 100644 (file)
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_OFFSET(page, lru);
        VMCOREINFO_OFFSET(page, _mapcount);
        VMCOREINFO_OFFSET(page, private);
+       VMCOREINFO_OFFSET(page, compound_dtor);
+       VMCOREINFO_OFFSET(page, compound_order);
+       VMCOREINFO_OFFSET(page, compound_head);
        VMCOREINFO_OFFSET(pglist_data, node_zones);
        VMCOREINFO_OFFSET(pglist_data, nr_zones);
 #ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
 #ifdef CONFIG_X86
        VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
 #endif
-#ifdef CONFIG_HUGETLBFS
-       VMCOREINFO_SYMBOL(free_huge_page);
+#ifdef CONFIG_HUGETLB_PAGE
+       VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
 #endif
 
        arch_crash_save_vmcoreinfo();
index 53ab2f85d77e856942224b22980727c19e7dceae..78c1c0ee6dc1256904e1afb90611818813fc031c 100644 (file)
@@ -1999,6 +1999,79 @@ static inline int get_first_held_lock(struct task_struct *curr,
        return ++i;
 }
 
+#ifdef CONFIG_DEBUG_LOCKDEP
+/*
+ * Returns the next chain_key iteration
+ */
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+{
+       u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+
+       printk(" class_idx:%d -> chain_key:%016Lx",
+               class_idx,
+               (unsigned long long)new_chain_key);
+       return new_chain_key;
+}
+
+static void
+print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
+{
+       struct held_lock *hlock;
+       u64 chain_key = 0;
+       int depth = curr->lockdep_depth;
+       int i;
+
+       printk("depth: %u\n", depth + 1);
+       for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+               hlock = curr->held_locks + i;
+               chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+
+               print_lock(hlock);
+       }
+
+       print_chain_key_iteration(hlock_next->class_idx, chain_key);
+       print_lock(hlock_next);
+}
+
+static void print_chain_keys_chain(struct lock_chain *chain)
+{
+       int i;
+       u64 chain_key = 0;
+       int class_id;
+
+       printk("depth: %u\n", chain->depth);
+       for (i = 0; i < chain->depth; i++) {
+               class_id = chain_hlocks[chain->base + i];
+               chain_key = print_chain_key_iteration(class_id + 1, chain_key);
+
+               print_lock_name(lock_classes + class_id);
+               printk("\n");
+       }
+}
+
+static void print_collision(struct task_struct *curr,
+                       struct held_lock *hlock_next,
+                       struct lock_chain *chain)
+{
+       printk("\n");
+       printk("======================\n");
+       printk("[chain_key collision ]\n");
+       print_kernel_ident();
+       printk("----------------------\n");
+       printk("%s/%d: ", current->comm, task_pid_nr(current));
+       printk("Hash chain already cached but the contents don't match!\n");
+
+       printk("Held locks:");
+       print_chain_keys_held_locks(curr, hlock_next);
+
+       printk("Locks in cached chain:");
+       print_chain_keys_chain(chain);
+
+       printk("\nstack backtrace:\n");
+       dump_stack();
+}
+#endif
+
 /*
  * Checks whether the chain and the current held locks are consistent
  * in depth and also in content. If they are not it most likely means
@@ -2014,14 +2087,18 @@ static int check_no_collision(struct task_struct *curr,
 
        i = get_first_held_lock(curr, hlock);
 
-       if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
+       if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
+               print_collision(curr, hlock, chain);
                return 0;
+       }
 
        for (j = 0; j < chain->depth - 1; j++, i++) {
                id = curr->held_locks[i].class_idx - 1;
 
-               if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
+               if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+                       print_collision(curr, hlock, chain);
                        return 0;
+               }
        }
 #endif
        return 1;
@@ -2099,15 +2176,37 @@ cache_hit:
        chain->irq_context = hlock->irq_context;
        i = get_first_held_lock(curr, hlock);
        chain->depth = curr->lockdep_depth + 1 - i;
+
+       BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+       BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
+       BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
+
        if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
                chain->base = nr_chain_hlocks;
-               nr_chain_hlocks += chain->depth;
                for (j = 0; j < chain->depth - 1; j++, i++) {
                        int lock_id = curr->held_locks[i].class_idx - 1;
                        chain_hlocks[chain->base + j] = lock_id;
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
        }
+
+       if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+               nr_chain_hlocks += chain->depth;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+       /*
+        * Important for check_no_collision().
+        */
+       if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+               if (debug_locks_off_graph_unlock())
+                       return 0;
+
+               print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+               dump_stack();
+               return 0;
+       }
+#endif
+
        hlist_add_head_rcu(&chain->entry, hash_head);
        debug_atomic_inc(chain_lookup_misses);
        inc_chains();
@@ -2855,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
        return 1;
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+       return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
 static int separate_irq_context(struct task_struct *curr,
                struct held_lock *hlock)
 {
@@ -2863,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
        /*
         * Keep track of points where we cross into an interrupt context:
         */
-       hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
-                               curr->softirq_context;
        if (depth) {
                struct held_lock *prev_hlock;
 
@@ -2896,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
        return 1;
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+       return 0;
+}
+
 static inline int separate_irq_context(struct task_struct *curr,
                struct held_lock *hlock)
 {
@@ -3164,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->acquire_ip = ip;
        hlock->instance = lock;
        hlock->nest_lock = nest_lock;
+       hlock->irq_context = task_irq_context(curr);
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;
index dbb61a3025484b4d38bd090bd5923c21a3960ced..a0f61effad25cf0fa2d1dd27557dccd4a3f3ffc5 100644 (file)
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
        int i;
 
        if (v == SEQ_START_TOKEN) {
+               if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
+                       seq_printf(m, "(buggered) ");
                seq_printf(m, "all lock chains:\n");
                return 0;
        }
index eb2a2c9bc3fc15d181c9e5981648974dc7aec65c..d734b750200180afc91d9f5d2015f1cb27aa6032 100644 (file)
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
        }
 
        if (counter == qstat_pv_hash_hops) {
-               u64 frac;
+               u64 frac = 0;
 
-               frac = 100ULL * do_div(stat, kicks);
-               frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+               if (kicks) {
+                       frac = 100ULL * do_div(stat, kicks);
+                       frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+               }
 
                /*
                 * Return a X.XX decimal number
index 2e78ead309344ce3d00b1c6701c4e452856089be..9b5f04404152c296af3a96132f27cfc80ffa9af9 100644 (file)
@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
 {
        struct resource *root = m->private;
        struct resource *r = v, *p;
+       unsigned long long start, end;
        int width = root->end < 0x10000 ? 4 : 8;
        int depth;
 
        for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
                if (p->parent == root)
                        break;
+
+       if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+               start = r->start;
+               end = r->end;
+       } else {
+               start = end = 0;
+       }
+
        seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
                        depth * 2, "",
-                       width, (unsigned long long) r->start,
-                       width, (unsigned long long) r->end,
+                       width, start,
+                       width, end,
                        r->name ? r->name : "<BAD>");
        return 0;
 }
index d8465eeab8b3d7878866dc1aee7a87bfcb3f1b5e..d1f7149f870439d65b9cfcfbc27d4160bbb1672f 100644 (file)
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
 }
 #endif /* CONFIG_SCHED_HRTICK */
 
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask)                                            \
+       ({                                                              \
+               typeof(ptr) _ptr = (ptr);                               \
+               typeof(mask) _mask = (mask);                            \
+               typeof(*_ptr) _old, _val = *_ptr;                       \
+                                                                       \
+               for (;;) {                                              \
+                       _old = cmpxchg(_ptr, _val, _val | _mask);       \
+                       if (_old == _val)                               \
+                               break;                                  \
+                       _val = _old;                                    \
+               }                                                       \
+       _old;                                                           \
+})
+
 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 /*
  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
@@ -578,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -597,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
index 21f82c29c9143c9a39e658593b0ac4671ce9c1b9..b3f05ee20d1845736580d3c4fbd55d67e7fc3b51 100644 (file)
@@ -357,10 +357,6 @@ static int parse(struct nlattr *na, struct cpumask *mask)
        return ret;
 }
 
-#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-#define TASKSTATS_NEEDS_PADDING 1
-#endif
-
 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
 {
        struct nlattr *na, *ret;
@@ -370,29 +366,6 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
                        ? TASKSTATS_TYPE_AGGR_PID
                        : TASKSTATS_TYPE_AGGR_TGID;
 
-       /*
-        * The taskstats structure is internally aligned on 8 byte
-        * boundaries but the layout of the aggregrate reply, with
-        * two NLA headers and the pid (each 4 bytes), actually
-        * force the entire structure to be unaligned. This causes
-        * the kernel to issue unaligned access warnings on some
-        * architectures like ia64. Unfortunately, some software out there
-        * doesn't properly unroll the NLA packet and assumes that the start
-        * of the taskstats structure will always be 20 bytes from the start
-        * of the netlink payload. Aligning the start of the taskstats
-        * structure breaks this software, which we don't want. So, for now
-        * the alignment only happens on architectures that require it
-        * and those users will have to update to fixed versions of those
-        * packages. Space is reserved in the packet only when needed.
-        * This ifdef should be removed in several years e.g. 2012 once
-        * we can be confident that fixed versions are installed on most
-        * systems. We add the padding before the aggregate since the
-        * aggregate is already a defined type.
-        */
-#ifdef TASKSTATS_NEEDS_PADDING
-       if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
-               goto err;
-#endif
        na = nla_nest_start(skb, aggr);
        if (!na)
                goto err;
@@ -401,7 +374,8 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
                nla_nest_cancel(skb, na);
                goto err;
        }
-       ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
+       ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
+                               sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
        if (!ret) {
                nla_nest_cancel(skb, na);
                goto err;
@@ -500,10 +474,9 @@ static size_t taskstats_packet_size(void)
        size_t size;
 
        size = nla_total_size(sizeof(u32)) +
-               nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
-#ifdef TASKSTATS_NEEDS_PADDING
-       size += nla_total_size(0); /* Padding for alignment */
-#endif
+               nla_total_size_64bit(sizeof(struct taskstats)) +
+               nla_total_size(0);
+
        return size;
 }
 
index 084b79f5917e5faa8e8ffc768387ef7011fe94d5..58e3310c9b213617210fb19c10b6eb9b22a405a9 100644 (file)
@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 cpumask_var_t tick_nohz_full_mask;
 cpumask_var_t housekeeping_mask;
 bool tick_nohz_full_running;
-static unsigned long tick_dep_mask;
+static atomic_t tick_dep_mask;
 
-static void trace_tick_dependency(unsigned long dep)
+static bool check_tick_dependency(atomic_t *dep)
 {
-       if (dep & TICK_DEP_MASK_POSIX_TIMER) {
+       int val = atomic_read(dep);
+
+       if (val & TICK_DEP_MASK_POSIX_TIMER) {
                trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
-               return;
+               return true;
        }
 
-       if (dep & TICK_DEP_MASK_PERF_EVENTS) {
+       if (val & TICK_DEP_MASK_PERF_EVENTS) {
                trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
-               return;
+               return true;
        }
 
-       if (dep & TICK_DEP_MASK_SCHED) {
+       if (val & TICK_DEP_MASK_SCHED) {
                trace_tick_stop(0, TICK_DEP_MASK_SCHED);
-               return;
+               return true;
        }
 
-       if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE)
+       if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
                trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
+               return true;
+       }
+
+       return false;
 }
 
 static bool can_stop_full_tick(struct tick_sched *ts)
 {
        WARN_ON_ONCE(!irqs_disabled());
 
-       if (tick_dep_mask) {
-               trace_tick_dependency(tick_dep_mask);
+       if (check_tick_dependency(&tick_dep_mask))
                return false;
-       }
 
-       if (ts->tick_dep_mask) {
-               trace_tick_dependency(ts->tick_dep_mask);
+       if (check_tick_dependency(&ts->tick_dep_mask))
                return false;
-       }
 
-       if (current->tick_dep_mask) {
-               trace_tick_dependency(current->tick_dep_mask);
+       if (check_tick_dependency(&current->tick_dep_mask))
                return false;
-       }
 
-       if (current->signal->tick_dep_mask) {
-               trace_tick_dependency(current->signal->tick_dep_mask);
+       if (check_tick_dependency(&current->signal->tick_dep_mask))
                return false;
-       }
 
        return true;
 }
@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
        preempt_enable();
 }
 
-static void tick_nohz_dep_set_all(unsigned long *dep,
+static void tick_nohz_dep_set_all(atomic_t *dep,
                                  enum tick_dep_bits bit)
 {
-       unsigned long prev;
+       int prev;
 
-       prev = fetch_or(dep, BIT_MASK(bit));
+       prev = atomic_fetch_or(dep, BIT(bit));
        if (!prev)
                tick_nohz_full_kick_all();
 }
@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
 
 void tick_nohz_dep_clear(enum tick_dep_bits bit)
 {
-       clear_bit(bit, &tick_dep_mask);
+       atomic_andnot(BIT(bit), &tick_dep_mask);
 }
 
 /*
@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
  */
 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 {
-       unsigned long prev;
+       int prev;
        struct tick_sched *ts;
 
        ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 
-       prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit));
+       prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
        if (!prev) {
                preempt_disable();
                /* Perf needs local kick that is NMI safe */
@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
 {
        struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 
-       clear_bit(bit, &ts->tick_dep_mask);
+       atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 }
 
 /*
@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 
 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 {
-       clear_bit(bit, &tsk->tick_dep_mask);
+       atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 }
 
 /*
@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 
 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 {
-       clear_bit(bit, &sig->tick_dep_mask);
+       atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 }
 
 /*
@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
        ts = this_cpu_ptr(&tick_cpu_sched);
 
        if (ts->tick_stopped) {
-               if (current->tick_dep_mask || current->signal->tick_dep_mask)
+               if (atomic_read(&current->tick_dep_mask) ||
+                   atomic_read(&current->signal->tick_dep_mask))
                        tick_nohz_full_kick();
        }
 out:
index eb4e32566a832c27afc6f388121521abab5c5d9b..bf38226e5c17c15e276c2e4e8c5f4fe423e071f3 100644 (file)
@@ -60,7 +60,7 @@ struct tick_sched {
        u64                             next_timer;
        ktime_t                         idle_expires;
        int                             do_timer_last;
-       unsigned long                   tick_dep_mask;
+       atomic_t                        tick_dep_mask;
 };
 
 extern struct tick_sched *tick_get_tick_sched(int cpu);
index 413ec561418014911f01a17f52aec28bd3a7d87d..780bcbe1d4de33bbb24d2328b231bb056bc641b5 100644 (file)
@@ -62,17 +62,21 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        void *dst = (void *) (long) r1;
-       int size = (int) r2;
+       int ret, size = (int) r2;
        void *unsafe_ptr = (void *) (long) r3;
 
-       return probe_kernel_read(dst, unsafe_ptr, size);
+       ret = probe_kernel_read(dst, unsafe_ptr, size);
+       if (unlikely(ret < 0))
+               memset(dst, 0, size);
+
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_probe_read_proto = {
        .func           = bpf_probe_read,
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_STACK,
+       .arg1_type      = ARG_PTR_TO_RAW_STACK,
        .arg2_type      = ARG_CONST_STACK_SIZE,
        .arg3_type      = ARG_ANYTHING,
 };
@@ -221,11 +225,12 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
-static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
 {
        struct pt_regs *regs = (struct pt_regs *) (long) r1;
        struct bpf_map *map = (struct bpf_map *) (long) r2;
        struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u64 index = flags & BPF_F_INDEX_MASK;
        void *data = (void *) (long) r4;
        struct perf_sample_data sample_data;
        struct perf_event *event;
@@ -235,6 +240,10 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
                .data = data,
        };
 
+       if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+               return -EINVAL;
+       if (index == BPF_F_CURRENT_CPU)
+               index = raw_smp_processor_id();
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
@@ -268,6 +277,33 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
        .arg5_type      = ARG_CONST_STACK_SIZE,
 };
 
+static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
+
+static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+{
+       struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
+
+       perf_fetch_caller_regs(regs);
+
+       return bpf_perf_event_output((long)regs, r2, flags, r4, size);
+}
+
+static const struct bpf_func_proto bpf_event_output_proto = {
+       .func           = bpf_event_output,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_STACK,
+       .arg5_type      = ARG_CONST_STACK_SIZE,
+};
+
+const struct bpf_func_proto *bpf_get_event_output_proto(void)
+{
+       return &bpf_event_output_proto;
+}
+
 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
@@ -347,7 +383,7 @@ static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
         * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
         * from there and call the same bpf_perf_event_output() helper
         */
-       u64 ctx = *(long *)r1;
+       u64 ctx = *(long *)(uintptr_t)r1;
 
        return bpf_perf_event_output(ctx, r2, index, r4, size);
 }
@@ -365,7 +401,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
 
 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
-       u64 ctx = *(long *)r1;
+       u64 ctx = *(long *)(uintptr_t)r1;
 
        return bpf_get_stackid(ctx, r2, r3, r4, r5);
 }
index ced963049e0aa4b04cdb4314443e48d71135fd44..b7b0760ba6ee9984521691704aa9483c034f54d6 100644 (file)
@@ -2113,8 +2113,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
        trace_create_file("filter", 0644, file->dir, file,
                          &ftrace_event_filter_fops);
 
-       trace_create_file("trigger", 0644, file->dir, file,
-                         &event_trigger_fops);
+       /*
+        * Only event directories that can be enabled should have
+        * triggers.
+        */
+       if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+               trace_create_file("trigger", 0644, file->dir, file,
+                                 &event_trigger_fops);
 
        trace_create_file("format", 0444, file->dir, call,
                          &ftrace_event_format_fops);
index 2232ae3e3ad655ad4a697cf4ed0bd3fb07878a43..3bfdff06eea728b38364652808e9f548f0e6fe37 100644 (file)
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
         */
        smp_wmb();
        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+       /*
+        * The following mb guarantees that previous clear of a PENDING bit
+        * will not be reordered with any speculative LOADS or STORES from
+        * work->current_func, which is executed afterwards.  This possible
+        * reordering can lead to a missed execution on attempt to qeueue
+        * the same @work.  E.g. consider this case:
+        *
+        *   CPU#0                         CPU#1
+        *   ----------------------------  --------------------------------
+        *
+        * 1  STORE event_indicated
+        * 2  queue_work_on() {
+        * 3    test_and_set_bit(PENDING)
+        * 4 }                             set_..._and_clear_pending() {
+        * 5                                 set_work_data() # clear bit
+        * 6                                 smp_mb()
+        * 7                               work->current_func() {
+        * 8                                  LOAD event_indicated
+        *                                 }
+        *
+        * Without an explicit full barrier speculative LOAD on line 8 can
+        * be executed before CPU#0 does STORE on line 1.  If that happens,
+        * CPU#0 observes the PENDING bit is still set and new execution of
+        * a @work is not queued in a hope, that CPU#1 will eventually
+        * finish the queued @work.  Meanwhile CPU#1 does not see
+        * event_indicated is set, because speculative LOAD was executed
+        * before actual STORE.
+        */
+       smp_mb();
 }
 
 static void clear_work_data(struct work_struct *work)
index 03dd576e67730fb2870c44512f55c1c0c39b77f3..59fd7c0b119cbc3e0b61137732f76cc3759fb7c7 100644 (file)
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
                        free_slot = i;
                        continue;
                }
-               if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+               if (assoc_array_ptr_is_leaf(ptr) &&
+                   ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+                                       index_key)) {
                        pr_devel("replace in slot %d\n", i);
                        edit->leaf_p = &node->slots[i];
                        edit->dead_leaf = node->slots[i];
index abcecdc2d0f23a8ac9884db5297d7b46dfbcea1b..c79d7ea8a38e47b8292d9f9a23bb0744a0efe7c8 100644 (file)
@@ -11,8 +11,7 @@
 /*
  * Detects 64 bits mode
  */
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
-       || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
 #define LZ4_ARCH64 1
 #else
 #define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
 typedef struct _U16_S { u16 v; } U16_S;
 typedef struct _U32_S { u32 v; } U32_S;
 typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)            \
-       || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6       \
-       && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 
 #define A16(x) (((U16_S *)(x))->v)
 #define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@ typedef struct _U64_S { u64 v; } U64_S;
 
 #define PUT4(s, d) (A32(d) = A32(s))
 #define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)      \
+       (d = s - A16(p))
+
 #define LZ4_WRITE_LITTLEENDIAN_16(p, v)        \
        do {    \
                A16(p) = v; \
@@ -51,10 +52,13 @@ typedef struct _U64_S { u64 v; } U64_S;
 #define PUT8(s, d) \
        put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
 
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)        \
-       do {    \
-               put_unaligned(v, (u16 *)(p)); \
-               p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)      \
+       (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)                        \
+       do {                                            \
+               put_unaligned_le16(v, (u16 *)(p));      \
+               p += 2;                                 \
        } while (0)
 #endif
 
@@ -140,9 +144,6 @@ typedef struct _U64_S { u64 v; } U64_S;
 
 #endif
 
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
-       (d = s - get_unaligned_le16(p))
-
 #define LZ4_WILDCOPY(s, d, e)          \
        do {                            \
                LZ4_COPYPACKET(s, d);   \
index f5907d23272d48562c69b911e5a0a619e3f4c180..fce1e9afc6d97a3a27787d44b48590a5850138aa 100644 (file)
@@ -354,6 +354,30 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
 }
 EXPORT_SYMBOL(__nla_reserve);
 
+/**
+ * __nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+                                  int attrlen, int padattr)
+{
+       if (nla_need_padding_for_64bit(skb))
+               nla_align_64bit(skb, padattr);
+
+       return __nla_reserve(skb, attrtype, attrlen);
+}
+EXPORT_SYMBOL(__nla_reserve_64bit);
+
 /**
  * __nla_reserve_nohdr - reserve room for attribute without header
  * @skb: socket buffer to reserve room on
@@ -396,6 +420,36 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
 }
 EXPORT_SYMBOL(nla_reserve);
 
+/**
+ * nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+                                int padattr)
+{
+       size_t len;
+
+       if (nla_need_padding_for_64bit(skb))
+               len = nla_total_size_64bit(attrlen);
+       else
+               len = nla_total_size(attrlen);
+       if (unlikely(skb_tailroom(skb) < len))
+               return NULL;
+
+       return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+}
+EXPORT_SYMBOL(nla_reserve_64bit);
+
 /**
  * nla_reserve_nohdr - reserve room for attribute without header
  * @skb: socket buffer to reserve room on
@@ -435,6 +489,27 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
 }
 EXPORT_SYMBOL(__nla_put);
 
+/**
+ * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+                    const void *data, int padattr)
+{
+       struct nlattr *nla;
+
+       nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+       memcpy(nla_data(nla), data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put_64bit);
+
 /**
  * __nla_put_nohdr - Add a netlink attribute without header
  * @skb: socket buffer to add attribute to
@@ -473,6 +548,34 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
 }
 EXPORT_SYMBOL(nla_put);
 
+/**
+ * nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+                 const void *data, int padattr)
+{
+       size_t len;
+
+       if (nla_need_padding_for_64bit(skb))
+               len = nla_total_size_64bit(attrlen);
+       else
+               len = nla_total_size(attrlen);
+       if (unlikely(skb_tailroom(skb) < len))
+               return -EMSGSIZE;
+
+       __nla_put_64bit(skb, attrtype, attrlen, data, padattr);
+       return 0;
+}
+EXPORT_SYMBOL(nla_put_64bit);
+
 /**
  * nla_put_nohdr - Add a netlink attribute without header
  * @skb: socket buffer to add attribute to
index 654c9d87e83aac9da685d2626dc437f2be0ac95c..53ad6c0831aebe6d3c9ec7cfc1387fa34ca7ea72 100644 (file)
 
 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
 
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
 #define STACK_ALLOC_ALIGN 4
 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
                                        STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+               STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
 #define STACK_ALLOC_SLABS_CAP 1024
 #define STACK_ALLOC_MAX_SLABS \
        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@ union handle_parts {
        struct {
                u32 slabindex : STACK_ALLOC_INDEX_BITS;
                u32 offset : STACK_ALLOC_OFFSET_BITS;
+               u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
        };
 };
 
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
        stack->size = size;
        stack->handle.slabindex = depot_index;
        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+       stack->handle.valid = 1;
        memcpy(stack->entries, entries, size * sizeof(unsigned long));
        depot_offset += required_size;
 
@@ -210,10 +214,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
                goto fast_exit;
 
        hash = hash_stack(trace->entries, trace->nr_entries);
-       /* Bad luck, we won't store this stack. */
-       if (hash == 0)
-               goto exit;
-
        bucket = &stack_table[hash & STACK_HASH_MASK];
 
        /*
index 27a7a26b1ece2e145296b144096191ffa6af504e..8f22fbedc3a699d07f05375d0a0832cc9da1d1b0 100644 (file)
@@ -2443,6 +2443,22 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 4294967295U } },
        },
+       {
+               "ALU_ADD_X: 2 + 4294967294 = 0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_LD_IMM64(R1, 4294967294U),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        {
                "ALU64_ADD_X: 1 + 2 = 3",
                .u.insns_int = {
@@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 4294967295U } },
        },
+       {
+               "ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_LD_IMM64(R1, 4294967294U),
+                       BPF_LD_IMM64(R2, 4294967296ULL),
+                       BPF_ALU64_REG(BPF_ADD, R0, R1),
+                       BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
+                       BPF_MOV32_IMM(R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_ALU | BPF_ADD | BPF_K */
        {
                "ALU_ADD_K: 1 + 2 = 3",
@@ -2501,6 +2534,21 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 4294967295U } },
        },
+       {
+               "ALU_ADD_K: 4294967294 + 2 = 0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967294U),
+                       BPF_ALU32_IMM(BPF_ADD, R0, 2),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        {
                "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
                .u.insns_int = {
@@ -2517,6 +2565,70 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 0x1 } },
        },
+       {
+               "ALU_ADD_K: 0 + 0xffff = 0xffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0xffff),
+                       BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0x7fffffff),
+                       BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0x80000000),
+                       BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0x80008000),
+                       BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
        {
                "ALU64_ADD_K: 1 + 2 = 3",
                .u.insns_int = {
@@ -2550,6 +2662,22 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 2147483647 } },
        },
+       {
+               "ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967294U),
+                       BPF_LD_IMM64(R1, 4294967296ULL),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 2),
+                       BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        {
                "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
                .u.insns_int = {
@@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 0x1 } },
        },
+       {
+               "ALU64_ADD_K: 0 + 0xffff = 0xffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0xffff),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0x7fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0xffffffff80000000LL),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0xffffffff80008000LL),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
        /* BPF_ALU | BPF_SUB | BPF_X */
        {
                "ALU_SUB_X: 3 - 1 = 2",
@@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       {
+               "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGE | BPF_K */
        {
                "JMP_JGE_K: if (3 >= 2) return 1",
@@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0),
                        BPF_LD_IMM64(R1, 3),
-                       BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+                       BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
                        BPF_EXIT_INSN(),
                        BPF_ALU32_IMM(BPF_MOV, R0, 1),
                        BPF_EXIT_INSN(),
@@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0),
                        BPF_LD_IMM64(R1, 3),
-                       BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+                       BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
                        BPF_EXIT_INSN(),
                        BPF_ALU32_IMM(BPF_MOV, R0, 1),
                        BPF_EXIT_INSN(),
@@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       {
+               "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, 1),
+                       BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGE | BPF_X */
        {
                "JMP_JGE_X: if (3 >= 2) return 1",
@@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0),
                        BPF_LD_IMM64(R1, 3),
                        BPF_LD_IMM64(R2, 2),
-                       BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+                       BPF_JMP_REG(BPF_JSET, R1, R2, 1),
                        BPF_EXIT_INSN(),
                        BPF_ALU32_IMM(BPF_MOV, R0, 1),
                        BPF_EXIT_INSN(),
@@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0),
                        BPF_LD_IMM64(R1, 3),
                        BPF_LD_IMM64(R2, 0xffffffff),
-                       BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+                       BPF_JMP_REG(BPF_JSET, R1, R2, 1),
                        BPF_EXIT_INSN(),
                        BPF_ALU32_IMM(BPF_MOV, R0, 1),
                        BPF_EXIT_INSN(),
index bfbd7096b6edc3c5e87398e93507cb124661b3ab..0c6317b7db38a086191a043b5ec480e4f5264e27 100644 (file)
@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2];
 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
        wait_queue_head_t *wqh = &congestion_wqh[sync];
-       enum wb_state bit;
+       enum wb_congested_state bit;
 
        bit = sync ? WB_sync_congested : WB_async_congested;
        if (test_and_clear_bit(bit, &congested->state))
@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested);
 
 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
-       enum wb_state bit;
+       enum wb_congested_state bit;
 
        bit = sync ? WB_sync_congested : WB_async_congested;
        if (!test_and_set_bit(bit, &congested->state))
index ccf97b02b85f32d38f6f68a41bedf89b9a1ee596..8fa2540438015c1859724c606072a2770939d954 100644 (file)
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
                                                        ISOLATE_UNEVICTABLE);
 
-               /*
-                * In case of fatal failure, release everything that might
-                * have been isolated in the previous iteration, and signal
-                * the failure back to caller.
-                */
-               if (!pfn) {
-                       putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
+               if (!pfn)
                        break;
-               }
 
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node)
 
 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
 {
-       return pgdat->kcompactd_max_order > 0;
+       return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
 }
 
 static bool kcompactd_node_suitable(pg_data_t *pgdat)
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                INIT_LIST_HEAD(&cc.freepages);
                INIT_LIST_HEAD(&cc.migratepages);
 
+               if (kthread_should_stop())
+                       return;
                status = compact_zone(zone, &cc);
 
                if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
index b8a5bc66b0c09b915237ab600cdeceaa81b8d153..b8024fa7101d9a8ff625606de973b9b670d0ecf4 100644 (file)
@@ -97,8 +97,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                break;
        case POSIX_FADV_WILLNEED:
                /* First and last PARTIAL page! */
-               start_index = offset >> PAGE_CACHE_SHIFT;
-               end_index = endbyte >> PAGE_CACHE_SHIFT;
+               start_index = offset >> PAGE_SHIFT;
+               end_index = endbyte >> PAGE_SHIFT;
 
                /* Careful about overflow on the "+1" */
                nrpages = end_index - start_index + 1;
@@ -124,8 +124,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                 * preserved on the expectation that it is better to preserve
                 * needed memory than to discard unneeded memory.
                 */
-               start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
-               end_index = (endbyte >> PAGE_CACHE_SHIFT);
+               start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
+               end_index = (endbyte >> PAGE_SHIFT);
 
                if (end_index >= start_index) {
                        unsigned long count = invalidate_mapping_pages(mapping,
index a8c69c8c0a90a1e50cfdc822ce2cbc3854e0c8e8..f2479af09da91ae8767b009c294bef2e3e75fc2e 100644 (file)
@@ -265,7 +265,7 @@ void delete_from_page_cache(struct page *page)
 
        if (freepage)
                freepage(page);
-       page_cache_release(page);
+       put_page(page);
 }
 EXPORT_SYMBOL(delete_from_page_cache);
 
@@ -352,8 +352,8 @@ EXPORT_SYMBOL(filemap_flush);
 static int __filemap_fdatawait_range(struct address_space *mapping,
                                     loff_t start_byte, loff_t end_byte)
 {
-       pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
-       pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
+       pgoff_t index = start_byte >> PAGE_SHIFT;
+       pgoff_t end = end_byte >> PAGE_SHIFT;
        struct pagevec pvec;
        int nr_pages;
        int ret = 0;
@@ -550,7 +550,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                pgoff_t offset = old->index;
                freepage = mapping->a_ops->freepage;
 
-               page_cache_get(new);
+               get_page(new);
                new->mapping = mapping;
                new->index = offset;
 
@@ -572,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                radix_tree_preload_end();
                if (freepage)
                        freepage(old);
-               page_cache_release(old);
+               put_page(old);
        }
 
        return error;
@@ -651,7 +651,7 @@ static int __add_to_page_cache_locked(struct page *page,
                return error;
        }
 
-       page_cache_get(page);
+       get_page(page);
        page->mapping = mapping;
        page->index = offset;
 
@@ -675,7 +675,7 @@ err_insert:
        spin_unlock_irq(&mapping->tree_lock);
        if (!huge)
                mem_cgroup_cancel_charge(page, memcg, false);
-       page_cache_release(page);
+       put_page(page);
        return error;
 }
 
@@ -1083,7 +1083,7 @@ repeat:
                 * include/linux/pagemap.h for details.
                 */
                if (unlikely(page != *pagep)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
        }
@@ -1121,7 +1121,7 @@ repeat:
                /* Has the page been truncated? */
                if (unlikely(page->mapping != mapping)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
                VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1168,7 +1168,7 @@ repeat:
        if (fgp_flags & FGP_LOCK) {
                if (fgp_flags & FGP_NOWAIT) {
                        if (!trylock_page(page)) {
-                               page_cache_release(page);
+                               put_page(page);
                                return NULL;
                        }
                } else {
@@ -1178,7 +1178,7 @@ repeat:
                /* Has the page been truncated? */
                if (unlikely(page->mapping != mapping)) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
                VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1209,7 +1209,7 @@ no_page:
                err = add_to_page_cache_lru(page, mapping, offset,
                                gfp_mask & GFP_RECLAIM_MASK);
                if (unlikely(err)) {
-                       page_cache_release(page);
+                       put_page(page);
                        page = NULL;
                        if (err == -EEXIST)
                                goto repeat;
@@ -1278,7 +1278,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 export:
@@ -1343,7 +1343,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 
@@ -1405,7 +1405,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 
@@ -1415,7 +1415,7 @@ repeat:
                 * negatives, which is just confusing to the caller.
                 */
                if (page->mapping == NULL || page->index != iter.index) {
-                       page_cache_release(page);
+                       put_page(page);
                        break;
                }
 
@@ -1482,7 +1482,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 
@@ -1549,7 +1549,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 export:
@@ -1610,11 +1610,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
        unsigned int prev_offset;
        int error = 0;
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
-       prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
-       prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
-       last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
-       offset = *ppos & ~PAGE_CACHE_MASK;
+       index = *ppos >> PAGE_SHIFT;
+       prev_index = ra->prev_pos >> PAGE_SHIFT;
+       prev_offset = ra->prev_pos & (PAGE_SIZE-1);
+       last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
+       offset = *ppos & ~PAGE_MASK;
 
        for (;;) {
                struct page *page;
@@ -1648,7 +1648,7 @@ find_page:
                        if (PageUptodate(page))
                                goto page_ok;
 
-                       if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+                       if (inode->i_blkbits == PAGE_SHIFT ||
                                        !mapping->a_ops->is_partially_uptodate)
                                goto page_not_up_to_date;
                        if (!trylock_page(page))
@@ -1672,18 +1672,18 @@ page_ok:
                 */
 
                isize = i_size_read(inode);
-               end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+               end_index = (isize - 1) >> PAGE_SHIFT;
                if (unlikely(!isize || index > end_index)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto out;
                }
 
                /* nr is the maximum number of bytes to copy from this page */
-               nr = PAGE_CACHE_SIZE;
+               nr = PAGE_SIZE;
                if (index == end_index) {
-                       nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+                       nr = ((isize - 1) & ~PAGE_MASK) + 1;
                        if (nr <= offset) {
-                               page_cache_release(page);
+                               put_page(page);
                                goto out;
                        }
                }
@@ -1711,11 +1711,11 @@ page_ok:
 
                ret = copy_page_to_iter(page, offset, nr, iter);
                offset += ret;
-               index += offset >> PAGE_CACHE_SHIFT;
-               offset &= ~PAGE_CACHE_MASK;
+               index += offset >> PAGE_SHIFT;
+               offset &= ~PAGE_MASK;
                prev_offset = offset;
 
-               page_cache_release(page);
+               put_page(page);
                written += ret;
                if (!iov_iter_count(iter))
                        goto out;
@@ -1735,7 +1735,7 @@ page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        continue;
                }
 
@@ -1757,7 +1757,7 @@ readpage:
 
                if (unlikely(error)) {
                        if (error == AOP_TRUNCATED_PAGE) {
-                               page_cache_release(page);
+                               put_page(page);
                                error = 0;
                                goto find_page;
                        }
@@ -1774,7 +1774,7 @@ readpage:
                                         * invalidate_mapping_pages got it
                                         */
                                        unlock_page(page);
-                                       page_cache_release(page);
+                                       put_page(page);
                                        goto find_page;
                                }
                                unlock_page(page);
@@ -1789,7 +1789,7 @@ readpage:
 
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
-               page_cache_release(page);
+               put_page(page);
                goto out;
 
 no_cached_page:
@@ -1805,7 +1805,7 @@ no_cached_page:
                error = add_to_page_cache_lru(page, mapping, index,
                                mapping_gfp_constraint(mapping, GFP_KERNEL));
                if (error) {
-                       page_cache_release(page);
+                       put_page(page);
                        if (error == -EEXIST) {
                                error = 0;
                                goto find_page;
@@ -1817,10 +1817,10 @@ no_cached_page:
 
 out:
        ra->prev_pos = prev_index;
-       ra->prev_pos <<= PAGE_CACHE_SHIFT;
+       ra->prev_pos <<= PAGE_SHIFT;
        ra->prev_pos |= prev_offset;
 
-       *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
+       *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
        file_accessed(filp);
        return written ? written : error;
 }
@@ -1912,7 +1912,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
                else if (ret == -EEXIST)
                        ret = 0; /* losing race to add is OK */
 
-               page_cache_release(page);
+               put_page(page);
 
        } while (ret == AOP_TRUNCATED_PAGE);
 
@@ -2022,8 +2022,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        loff_t size;
        int ret = 0;
 
-       size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
-       if (offset >= size >> PAGE_CACHE_SHIFT)
+       size = round_up(i_size_read(inode), PAGE_SIZE);
+       if (offset >= size >> PAGE_SHIFT)
                return VM_FAULT_SIGBUS;
 
        /*
@@ -2049,7 +2049,7 @@ retry_find:
        }
 
        if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-               page_cache_release(page);
+               put_page(page);
                return ret | VM_FAULT_RETRY;
        }
 
@@ -2072,10 +2072,10 @@ retry_find:
         * Found the page and have a reference on it.
         * We must recheck i_size under page lock.
         */
-       size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
-       if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
+       size = round_up(i_size_read(inode), PAGE_SIZE);
+       if (unlikely(offset >= size >> PAGE_SHIFT)) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                return VM_FAULT_SIGBUS;
        }
 
@@ -2120,7 +2120,7 @@ page_not_uptodate:
                if (!PageUptodate(page))
                        error = -EIO;
        }
-       page_cache_release(page);
+       put_page(page);
 
        if (!error || error == AOP_TRUNCATED_PAGE)
                goto retry_find;
@@ -2164,7 +2164,7 @@ repeat:
 
                /* Has the page moved? */
                if (unlikely(page != *slot)) {
-                       page_cache_release(page);
+                       put_page(page);
                        goto repeat;
                }
 
@@ -2178,8 +2178,8 @@ repeat:
                if (page->mapping != mapping || !PageUptodate(page))
                        goto unlock;
 
-               size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
-               if (page->index >= size >> PAGE_CACHE_SHIFT)
+               size = round_up(i_size_read(mapping->host), PAGE_SIZE);
+               if (page->index >= size >> PAGE_SHIFT)
                        goto unlock;
 
                pte = vmf->pte + page->index - vmf->pgoff;
@@ -2195,7 +2195,7 @@ repeat:
 unlock:
                unlock_page(page);
 skip:
-               page_cache_release(page);
+               put_page(page);
 next:
                if (iter.index == vmf->max_pgoff)
                        break;
@@ -2278,7 +2278,7 @@ static struct page *wait_on_page_read(struct page *page)
        if (!IS_ERR(page)) {
                wait_on_page_locked(page);
                if (!PageUptodate(page)) {
-                       page_cache_release(page);
+                       put_page(page);
                        page = ERR_PTR(-EIO);
                }
        }
@@ -2301,7 +2301,7 @@ repeat:
                        return ERR_PTR(-ENOMEM);
                err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
-                       page_cache_release(page);
+                       put_page(page);
                        if (err == -EEXIST)
                                goto repeat;
                        /* Presumably ENOMEM for radix tree node */
@@ -2311,7 +2311,7 @@ repeat:
 filler:
                err = filler(data, page);
                if (err < 0) {
-                       page_cache_release(page);
+                       put_page(page);
                        return ERR_PTR(err);
                }
 
@@ -2364,7 +2364,7 @@ filler:
        /* Case c or d, restart the operation */
        if (!page->mapping) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                goto repeat;
        }
 
@@ -2511,7 +2511,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
        struct iov_iter data;
 
        write_len = iov_iter_count(from);
-       end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+       end = (pos + write_len - 1) >> PAGE_SHIFT;
 
        written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
        if (written)
@@ -2525,7 +2525,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
         */
        if (mapping->nrpages) {
                written = invalidate_inode_pages2_range(mapping,
-                                       pos >> PAGE_CACHE_SHIFT, end);
+                                       pos >> PAGE_SHIFT, end);
                /*
                 * If a page can not be invalidated, return 0 to fall back
                 * to buffered write.
@@ -2550,7 +2550,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
         */
        if (mapping->nrpages) {
                invalidate_inode_pages2_range(mapping,
-                                             pos >> PAGE_CACHE_SHIFT, end);
+                                             pos >> PAGE_SHIFT, end);
        }
 
        if (written > 0) {
@@ -2611,8 +2611,8 @@ ssize_t generic_perform_write(struct file *file,
                size_t copied;          /* Bytes copied from user */
                void *fsdata;
 
-               offset = (pos & (PAGE_CACHE_SIZE - 1));
-               bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+               offset = (pos & (PAGE_SIZE - 1));
+               bytes = min_t(unsigned long, PAGE_SIZE - offset,
                                                iov_iter_count(i));
 
 again:
@@ -2665,7 +2665,7 @@ again:
                         * because not all segments in the iov can be copied at
                         * once without a pagefault.
                         */
-                       bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                       bytes = min_t(unsigned long, PAGE_SIZE - offset,
                                                iov_iter_single_seg_count(i));
                        goto again;
                }
@@ -2752,8 +2752,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        iocb->ki_pos = endbyte + 1;
                        written += status;
                        invalidate_mapping_pages(mapping,
-                                                pos >> PAGE_CACHE_SHIFT,
-                                                endbyte >> PAGE_CACHE_SHIFT);
+                                                pos >> PAGE_SHIFT,
+                                                endbyte >> PAGE_SHIFT);
                } else {
                        /*
                         * We don't know how much we wrote, so just return
index 7f1c4fb77cfa5e9c6ca4fd4c04c83e01a9b273b1..c057784c844456f237adc9065bb4c9a3c230fdc6 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,4 +1,3 @@
-#define __DISABLE_GUP_DEPRECATED 1
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  *      if (locked)
  *          up_read(&mm->mmap_sem);
  */
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                           int write, int force, struct page **pages,
                           int *locked)
 {
@@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
                                       write, force, pages, NULL, locked, true,
                                       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 /*
  * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
  * or if "force" shall be set to 1 (get_user_pages_fast misses the
  * "force" parameter).
  */
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             int write, int force, struct page **pages)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
                                         write, force, pages, FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /*
  * get_user_pages_remote() - pin user pages in memory
@@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
  * and mm being operated on are the current task's.  We also
  * obviously don't pass FOLL_REMOTE in here.
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                int write, int force, struct page **pages,
                struct vm_area_struct **vmas)
 {
@@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
                                       write, force, pages, vmas, NULL, false,
                                       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
@@ -1107,7 +1106,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
  * @addr: user address
  *
  * Returns struct page pointer of user page pinned for dump,
- * to be freed afterwards by page_cache_release() or put_page().
+ * to be freed afterwards by put_page().
  *
  * Returns NULL on any kind of failure - a hole must then be inserted into
  * the corefile, to preserve alignment with its headers; and also returns
@@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
-       struct mm_struct *mm = current->mm;
        int nr, ret;
 
        start &= PAGE_MASK;
@@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(current, mm, start,
-                                             nr_pages - nr, write, 0, pages);
+               ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
@@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 }
 
 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, unsigned long nr_pages,
-                    int write, int force, struct page **pages,
-                    struct vm_area_struct **vmas)
-{
-       WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
-
-       return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-                           unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages, int *locked)
-{
-       WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
-
-       return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-                                 unsigned long start, unsigned long nr_pages,
-                                 int write, int force, struct page **pages)
-{
-       WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
-
-       return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
index 86f9f8b82f8ecfc47b92554e3a82501346b0fcb8..f7daa7de8f4867dc871edb8ea3a1ec6189e4d54f 100644 (file)
@@ -232,7 +232,7 @@ retry:
        return READ_ONCE(huge_zero_page);
 }
 
-static void put_huge_zero_page(void)
+void put_huge_zero_page(void)
 {
        /*
         * Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (vma_is_dax(vma)) {
                spin_unlock(ptl);
                if (is_huge_zero_pmd(orig_pmd))
-                       put_huge_zero_page();
+                       tlb_remove_page(tlb, pmd_page(orig_pmd));
        } else if (is_huge_zero_pmd(orig_pmd)) {
                pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
                atomic_long_dec(&tlb->mm->nr_ptes);
                spin_unlock(ptl);
-               put_huge_zero_page();
+               tlb_remove_page(tlb, pmd_page(orig_pmd));
        } else {
                struct page *page = pmd_page(orig_pmd);
                page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
                 * page fault if needed.
                 */
                return 0;
-       if (vma->vm_ops)
+       if (vma->vm_ops || (vm_flags & VM_NO_THP))
                /* khugepaged not yet working on file or special mappings */
                return 0;
-       VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
                return false;
        if (is_vma_temporary_stack(vma))
                return false;
-       VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
-       return true;
+       return !(vma->vm_flags & VM_NO_THP);
 }
 
 static void collapse_huge_page(struct mm_struct *mm,
@@ -3454,7 +3452,7 @@ next:
                }
        }
 
-       pr_info("%lu of %lu THP split", split, total);
+       pr_info("%lu of %lu THP split\n", split, total);
 
        return 0;
 }
@@ -3465,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void)
 {
        void *ret;
 
-       ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
+       ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
                        &split_huge_pages_fops);
        if (!ret)
                pr_warn("Failed to create split_huge_pages in debugfs");
index 06058eaa173bc25b2e3dc0186d0947ae5f1887d8..19d0d08b396fb1356bc4e834d2aaec2977ee173e 100644 (file)
@@ -3346,7 +3346,7 @@ retry_avoidcopy:
                        old_page != pagecache_page)
                outside_reserve = 1;
 
-       page_cache_get(old_page);
+       get_page(old_page);
 
        /*
         * Drop page table lock as buddy allocator may be called. It will
@@ -3364,7 +3364,7 @@ retry_avoidcopy:
                 * may get SIGKILLed if it later faults.
                 */
                if (outside_reserve) {
-                       page_cache_release(old_page);
+                       put_page(old_page);
                        BUG_ON(huge_pte_none(pte));
                        unmap_ref_private(mm, vma, old_page, address);
                        BUG_ON(huge_pte_none(pte));
@@ -3425,9 +3425,9 @@ retry_avoidcopy:
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out_release_all:
-       page_cache_release(new_page);
+       put_page(new_page);
 out_release_old:
-       page_cache_release(old_page);
+       put_page(old_page);
 
        spin_lock(ptl); /* Caller expects lock to be held */
        return ret;
index a01147359f3bb71c9c34b9c14f97ad0a730d33e5..07427d3fcead169febeb41d852adcbe0c2139c8d 100644 (file)
@@ -170,7 +170,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
                page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
                                                                vma, index);
                if (page)
-                       page_cache_release(page);
+                       put_page(page);
        }
 
        return 0;
@@ -204,14 +204,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
                page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
-                               page_cache_release(page);
+                               put_page(page);
                        continue;
                }
                swap = radix_to_swp_entry(page);
                page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
                                                                NULL, 0);
                if (page)
-                       page_cache_release(page);
+                       put_page(page);
        }
 
        lru_add_drain();        /* Push any new pages onto the LRU now */
index 36db05fa8acb8b49d95130a00de9b22233dac031..fe787f5c41bd1332eeca88e85a3f0bf483bb552b 100644 (file)
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 /* "mc" and its members are protected by cgroup_mutex */
 static struct move_charge_struct {
        spinlock_t        lock; /* for from, to */
+       struct mm_struct  *mm;
        struct mem_cgroup *from;
        struct mem_cgroup *to;
        unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
 
 static void mem_cgroup_clear_mc(void)
 {
+       struct mm_struct *mm = mc.mm;
+
        /*
         * we must clear moving_task before waking up waiters at the end of
         * task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
        spin_lock(&mc.lock);
        mc.from = NULL;
        mc.to = NULL;
+       mc.mm = NULL;
        spin_unlock(&mc.lock);
+
+       mmput(mm);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
                VM_BUG_ON(mc.moved_swap);
 
                spin_lock(&mc.lock);
+               mc.mm = mm;
                mc.from = from;
                mc.to = memcg;
                mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
                ret = mem_cgroup_precharge_mc(mm);
                if (ret)
                        mem_cgroup_clear_mc();
+       } else {
+               mmput(mm);
        }
-       mmput(mm);
        return ret;
 }
 
@@ -4852,11 +4860,11 @@ put:                    /* get_mctgt_type() gets the page */
        return ret;
 }
 
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
 {
        struct mm_walk mem_cgroup_move_charge_walk = {
                .pmd_entry = mem_cgroup_move_charge_pte_range,
-               .mm = mm,
+               .mm = mc.mm,
        };
 
        lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
        atomic_inc(&mc.from->moving_account);
        synchronize_rcu();
 retry:
-       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+       if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
                /*
                 * Someone who are holding the mmap_sem might be waiting in
                 * waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
         * additional charge, the page walk just aborts.
         */
        walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
-       up_read(&mm->mmap_sem);
+       up_read(&mc.mm->mmap_sem);
        atomic_dec(&mc.from->moving_account);
 }
 
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
 {
-       struct cgroup_subsys_state *css;
-       struct task_struct *p = cgroup_taskset_first(tset, &css);
-       struct mm_struct *mm = get_task_mm(p);
-
-       if (mm) {
-               if (mc.to)
-                       mem_cgroup_move_charge(mm);
-               mmput(mm);
-       }
-       if (mc.to)
+       if (mc.to) {
+               mem_cgroup_move_charge();
                mem_cgroup_clear_mc();
+       }
 }
 #else  /* !CONFIG_MMU */
 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
 }
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
 {
 }
 #endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
        .css_reset = mem_cgroup_css_reset,
        .can_attach = mem_cgroup_can_attach,
        .cancel_attach = mem_cgroup_cancel_attach,
-       .attach = mem_cgroup_move_task,
+       .post_attach = mem_cgroup_move_task,
        .bind = mem_cgroup_bind,
        .dfl_cftypes = memory_files,
        .legacy_cftypes = mem_cgroup_legacy_files,
index 5a544c6c0717c4908fe23e17a976847fb22981ce..ca5acee53b7a3b402e24fac99e358323e913e225 100644 (file)
@@ -538,7 +538,7 @@ static int delete_from_lru_cache(struct page *p)
                /*
                 * drop the page count elevated by isolate_lru_page()
                 */
-               page_cache_release(p);
+               put_page(p);
                return 0;
        }
        return -EIO;
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
                }
        }
 
-       return get_page_unless_zero(head);
+       if (get_page_unless_zero(head)) {
+               if (head == compound_head(page))
+                       return 1;
+
+               pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
+               put_page(head);
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(get_hwpoison_page);
 
index 098f00d0546170d6a969d4994b27cfab7a4955dc..52c218e2b724cdb8db1678c4070f31ab033daf65 100644 (file)
@@ -789,6 +789,46 @@ out:
        return pfn_to_page(pfn);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+                               pmd_t pmd)
+{
+       unsigned long pfn = pmd_pfn(pmd);
+
+       /*
+        * There is no pmd_special() but there may be special pmds, e.g.
+        * in a direct-access (dax) mapping, so let's just replicate the
+        * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+        */
+       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+               if (vma->vm_flags & VM_MIXEDMAP) {
+                       if (!pfn_valid(pfn))
+                               return NULL;
+                       goto out;
+               } else {
+                       unsigned long off;
+                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
+                       if (pfn == vma->vm_pgoff + off)
+                               return NULL;
+                       if (!is_cow_mapping(vma->vm_flags))
+                               return NULL;
+               }
+       }
+
+       if (is_zero_pfn(pfn))
+               return NULL;
+       if (unlikely(pfn > highest_memmap_pfn))
+               return NULL;
+
+       /*
+        * NOTE! We still have PageReserved() pages in the page tables.
+        * eg. VDSO mappings can cause them to exist.
+        */
+out:
+       return pfn_to_page(pfn);
+}
+#endif
+
 /*
  * copy one vm_area from one task to the other. Assumes the page tables
  * already present in the new task to be cleared in the whole range
@@ -1182,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-#ifdef CONFIG_DEBUG_VM
-                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
-                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
-                                               __func__, addr, end,
-                                               vma->vm_start,
-                                               vma->vm_end);
-                                       BUG();
-                               }
-#endif
+                               VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
+                                   !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
                                split_huge_pmd(vma, pmd, addr);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
@@ -2054,7 +2087,7 @@ static inline int wp_page_reuse(struct mm_struct *mm,
                VM_BUG_ON_PAGE(PageAnon(page), page);
                mapping = page->mapping;
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                if ((dirtied || page_mkwrite) && mapping) {
                        /*
@@ -2188,7 +2221,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        if (new_page)
-               page_cache_release(new_page);
+               put_page(new_page);
 
        pte_unmap_unlock(page_table, ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -2203,14 +2236,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
                                munlock_vma_page(old_page);
                        unlock_page(old_page);
                }
-               page_cache_release(old_page);
+               put_page(old_page);
        }
        return page_copied ? VM_FAULT_WRITE : 0;
 oom_free_new:
-       page_cache_release(new_page);
+       put_page(new_page);
 oom:
        if (old_page)
-               page_cache_release(old_page);
+               put_page(old_page);
        return VM_FAULT_OOM;
 }
 
@@ -2258,7 +2291,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        int page_mkwrite = 0;
 
-       page_cache_get(old_page);
+       get_page(old_page);
 
        if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
                int tmp;
@@ -2267,7 +2300,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
                tmp = do_page_mkwrite(vma, old_page, address);
                if (unlikely(!tmp || (tmp &
                                      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-                       page_cache_release(old_page);
+                       put_page(old_page);
                        return tmp;
                }
                /*
@@ -2281,7 +2314,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
                if (!pte_same(*page_table, orig_pte)) {
                        unlock_page(old_page);
                        pte_unmap_unlock(page_table, ptl);
-                       page_cache_release(old_page);
+                       put_page(old_page);
                        return 0;
                }
                page_mkwrite = 1;
@@ -2341,7 +2374,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        if (PageAnon(old_page) && !PageKsm(old_page)) {
                if (!trylock_page(old_page)) {
-                       page_cache_get(old_page);
+                       get_page(old_page);
                        pte_unmap_unlock(page_table, ptl);
                        lock_page(old_page);
                        page_table = pte_offset_map_lock(mm, pmd, address,
@@ -2349,10 +2382,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (!pte_same(*page_table, orig_pte)) {
                                unlock_page(old_page);
                                pte_unmap_unlock(page_table, ptl);
-                               page_cache_release(old_page);
+                               put_page(old_page);
                                return 0;
                        }
-                       page_cache_release(old_page);
+                       put_page(old_page);
                }
                if (reuse_swap_page(old_page)) {
                        /*
@@ -2375,7 +2408,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /*
         * Ok, we need to copy. Oh, well..
         */
-       page_cache_get(old_page);
+       get_page(old_page);
 
        pte_unmap_unlock(page_table, ptl);
        return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2400,7 +2433,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
 
                vba = vma->vm_pgoff;
                vea = vba + vma_pages(vma) - 1;
-               /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
                zba = details->first_index;
                if (zba < vba)
                        zba = vba;
@@ -2619,7 +2651,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * parallel locked swapcache.
                 */
                unlock_page(swapcache);
-               page_cache_release(swapcache);
+               put_page(swapcache);
        }
 
        if (flags & FAULT_FLAG_WRITE) {
@@ -2641,10 +2673,10 @@ out_nomap:
 out_page:
        unlock_page(page);
 out_release:
-       page_cache_release(page);
+       put_page(page);
        if (page != swapcache) {
                unlock_page(swapcache);
-               page_cache_release(swapcache);
+               put_page(swapcache);
        }
        return ret;
 }
@@ -2752,7 +2784,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (userfaultfd_missing(vma)) {
                pte_unmap_unlock(page_table, ptl);
                mem_cgroup_cancel_charge(page, memcg, false);
-               page_cache_release(page);
+               put_page(page);
                return handle_userfault(vma, address, flags,
                                        VM_UFFD_MISSING);
        }
@@ -2771,10 +2803,10 @@ unlock:
        return 0;
 release:
        mem_cgroup_cancel_charge(page, memcg, false);
-       page_cache_release(page);
+       put_page(page);
        goto unlock;
 oom_free_page:
-       page_cache_release(page);
+       put_page(page);
 oom:
        return VM_FAULT_OOM;
 }
@@ -2807,7 +2839,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        if (unlikely(PageHWPoison(vmf.page))) {
                if (ret & VM_FAULT_LOCKED)
                        unlock_page(vmf.page);
-               page_cache_release(vmf.page);
+               put_page(vmf.page);
                return VM_FAULT_HWPOISON;
        }
 
@@ -2996,7 +3028,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(!pte_same(*pte, orig_pte))) {
                pte_unmap_unlock(pte, ptl);
                unlock_page(fault_page);
-               page_cache_release(fault_page);
+               put_page(fault_page);
                return ret;
        }
        do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3024,7 +3056,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_OOM;
 
        if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
-               page_cache_release(new_page);
+               put_page(new_page);
                return VM_FAULT_OOM;
        }
 
@@ -3041,7 +3073,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
                if (fault_page) {
                        unlock_page(fault_page);
-                       page_cache_release(fault_page);
+                       put_page(fault_page);
                } else {
                        /*
                         * The fault handler has no page to lock, so it holds
@@ -3057,7 +3089,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_unmap_unlock(pte, ptl);
        if (fault_page) {
                unlock_page(fault_page);
-               page_cache_release(fault_page);
+               put_page(fault_page);
        } else {
                /*
                 * The fault handler has no page to lock, so it holds
@@ -3068,7 +3100,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        return ret;
 uncharge_out:
        mem_cgroup_cancel_charge(new_page, memcg, false);
-       page_cache_release(new_page);
+       put_page(new_page);
        return ret;
 }
 
@@ -3096,7 +3128,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                tmp = do_page_mkwrite(vma, fault_page, address);
                if (unlikely(!tmp ||
                                (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-                       page_cache_release(fault_page);
+                       put_page(fault_page);
                        return tmp;
                }
        }
@@ -3105,7 +3137,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(!pte_same(*pte, orig_pte))) {
                pte_unmap_unlock(pte, ptl);
                unlock_page(fault_page);
-               page_cache_release(fault_page);
+               put_page(fault_page);
                return ret;
        }
        do_set_pte(vma, address, fault_page, pte, true, false);
@@ -3736,7 +3768,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                                                    buf, maddr + offset, bytes);
                        }
                        kunmap(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
                len -= bytes;
                buf += bytes;
index 6c822a7b27e066148d38fd9ed47f80a3ad4de055..f9dfb18a4ebac9f2f36d798ce6fa6b4ecd4d3c77 100644 (file)
@@ -975,7 +975,13 @@ out:
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
                /* Soft-offlined page shouldn't go through lru cache list */
-               if (reason == MR_MEMORY_FAILURE) {
+               if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+                       /*
+                        * With this release, we free successfully migrated
+                        * page and set PG_HWPoison on just freed page
+                        * intentionally. Although it's rather weird, it's how
+                        * HWPoison flag works at the moment.
+                        */
                        put_page(page);
                        if (!test_set_page_hwpoison(page))
                                num_poisoned_pages_inc();
index 563f320454902df04e782f73aa8d0473656034c9..c0b5ba965200942741347500c0b6a739434b715d 100644 (file)
@@ -75,7 +75,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
 #endif
        if (page) {
                present = PageUptodate(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        return present;
@@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
  * return values:
  *  zero    - success
  *  -EFAULT - vec points to an illegal address
- *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
+ *  -EINVAL - addr is not a multiple of PAGE_SIZE
  *  -ENOMEM - Addresses in the range [addr, addr + len] are
  *             invalid for the address space of this process, or
  *             specify one or more pages which are not currently
@@ -226,14 +226,14 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
        unsigned char *tmp;
 
        /* Check the start address: needs to be page-aligned.. */
-       if (start & ~PAGE_CACHE_MASK)
+       if (start & ~PAGE_MASK)
                return -EINVAL;
 
        /* ..and we need to be passed a valid user-space range */
        if (!access_ok(VERIFY_READ, (void __user *) start, len))
                return -ENOMEM;
 
-       /* This also avoids any overflows on PAGE_CACHE_ALIGN */
+       /* This also avoids any overflows on PAGE_ALIGN */
        pages = len >> PAGE_SHIFT;
        pages += (offset_in_page(len)) != 0;
 
index de8b6b6580c1eddaeca1ab2ee05350e5a6b31620..c8bd59a03c71563b73c146d35e00550b8119d4c0 100644 (file)
@@ -15,8 +15,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define __DISABLE_GUP_DEPRECATED
-
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/vmacache.h>
@@ -141,7 +139,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                if (pages) {
                        pages[i] = virt_to_page(start);
                        if (pages[i])
-                               page_cache_get(pages[i]);
+                               get_page(pages[i]);
                }
                if (vmas)
                        vmas[i] = vma;
@@ -161,7 +159,7 @@ finish_or_fault:
  *   slab page or a secondary page from a compound page
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages,
                    struct vm_area_struct **vmas)
 {
@@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
        return __get_user_pages(current, current->mm, start, nr_pages, flags,
                                pages, vmas, NULL);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            int *locked)
 {
-       return get_user_pages6(start, nr_pages, write, force, pages, NULL);
+       return get_user_pages(start, nr_pages, write, force, pages, NULL);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
@@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 }
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             int write, int force, struct page **pages)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
                                         write, force, pages, 0);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /**
  * follow_pfn - look up PFN at a user virtual address
@@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void)
        return 0;
 }
 subsys_initcall(init_admin_reserve);
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, unsigned long nr_pages,
-                    int write, int force, struct page **pages,
-                    struct vm_area_struct **vmas)
-{
-       return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-                           unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages,
-                           int *locked)
-{
-       return get_user_pages_locked6(start, nr_pages, write,
-                                     force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-                             unsigned long start, unsigned long nr_pages,
-                             int write, int force, struct page **pages)
-{
-       return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
index 11ff8f75863105b773ee279fee5d6853e4680abb..bc5149d5ec38016da91a8b1c85aeca0193143f0c 100644 (file)
@@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
        if (gdtc->dirty > gdtc->bg_thresh)
                return true;
 
-       if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+       if (wb_stat(wb, WB_RECLAIMABLE) >
+           wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
                return true;
 
        if (mdtc) {
@@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
                if (mdtc->dirty > mdtc->bg_thresh)
                        return true;
 
-               if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+               if (wb_stat(wb, WB_RECLAIMABLE) >
+                   wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
                        return true;
        }
 
@@ -2176,8 +2178,8 @@ int write_cache_pages(struct address_space *mapping,
                        cycled = 0;
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
                cycled = 1; /* ignore range_cyclic tests */
@@ -2382,14 +2384,14 @@ int write_one_page(struct page *page, int wait)
                wait_on_page_writeback(page);
 
        if (clear_page_dirty_for_io(page)) {
-               page_cache_get(page);
+               get_page(page);
                ret = mapping->a_ops->writepage(page, &wbc);
                if (ret == 0 && wait) {
                        wait_on_page_writeback(page);
                        if (PageError(page))
                                ret = -EIO;
                }
-               page_cache_release(page);
+               put_page(page);
        } else {
                unlock_page(page);
        }
@@ -2431,7 +2433,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                __inc_zone_page_state(page, NR_DIRTIED);
                __inc_wb_stat(wb, WB_RECLAIMABLE);
                __inc_wb_stat(wb, WB_DIRTIED);
-               task_io_account_write(PAGE_CACHE_SIZE);
+               task_io_account_write(PAGE_SIZE);
                current->nr_dirtied++;
                this_cpu_inc(bdp_ratelimits);
        }
@@ -2450,7 +2452,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
                dec_zone_page_state(page, NR_FILE_DIRTY);
                dec_wb_stat(wb, WB_RECLAIMABLE);
-               task_io_account_cancelled_write(PAGE_CACHE_SIZE);
+               task_io_account_cancelled_write(PAGE_SIZE);
        }
 }
 
index 59de90d5d3a362c7293e624e748cd7b6ebab73b3..c1069efcc4d7477a5fc517303b67747f89b77074 100644 (file)
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_wmark_min)
+core_initcall(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
index 18aac7819cc97b4f2b9fe834600a30614d3e8b33..985f23cfa79b75d3b38fad9bfa5cdf4385f7c8e4 100644 (file)
@@ -252,7 +252,7 @@ out:
 
 static sector_t swap_page_sector(struct page *page)
 {
-       return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9);
+       return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
 }
 
 int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
 
        ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
        if (!ret) {
-               swap_slot_free_notify(page);
+               if (trylock_page(page)) {
+                       swap_slot_free_notify(page);
+                       unlock_page(page);
+               }
+
                count_vm_event(PSWPIN);
                return 0;
        }
index 20e58e820e444d630b2ab807a369c53679eaa7ba..40be3ae0afe3061e2bf762cc033f3142972c8ffd 100644 (file)
@@ -47,11 +47,11 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
                if (!trylock_page(page))
                        BUG();
                page->mapping = mapping;
-               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               do_invalidatepage(page, 0, PAGE_SIZE);
                page->mapping = NULL;
                unlock_page(page);
        }
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -93,14 +93,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
                        read_cache_pages_invalidate_page(mapping, page);
                        continue;
                }
-               page_cache_release(page);
+               put_page(page);
 
                ret = filler(data, page);
                if (unlikely(ret)) {
                        read_cache_pages_invalidate_pages(mapping, pages);
                        break;
                }
-               task_io_account_read(PAGE_CACHE_SIZE);
+               task_io_account_read(PAGE_SIZE);
        }
        return ret;
 }
@@ -130,7 +130,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                                mapping_gfp_constraint(mapping, GFP_KERNEL))) {
                        mapping->a_ops->readpage(filp, page);
                }
-               page_cache_release(page);
+               put_page(page);
        }
        ret = 0;
 
@@ -163,7 +163,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
        if (isize == 0)
                goto out;
 
-       end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+       end_index = ((isize - 1) >> PAGE_SHIFT);
 
        /*
         * Preallocate as many pages as we will need.
@@ -216,7 +216,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
        while (nr_to_read) {
                int err;
 
-               unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
+               unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 
                if (this_chunk > nr_to_read)
                        this_chunk = nr_to_read;
@@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping,
         * trivial case: (offset - prev_offset) == 1
         * unaligned reads: (offset - prev_offset) == 0
         */
-       prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+       prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
        if (offset - prev_offset <= 1UL)
                goto initial_readahead;
 
@@ -558,8 +558,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
        if (f.file) {
                if (f.file->f_mode & FMODE_READ) {
                        struct address_space *mapping = f.file->f_mapping;
-                       pgoff_t start = offset >> PAGE_CACHE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+                       pgoff_t start = offset >> PAGE_SHIFT;
+                       pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
                        unsigned long len = end - start + 1;
                        ret = do_readahead(mapping, f.file, start, len);
                }
index 395e314b79962696d9be729861bd28300fd7e68b..307b555024efb6787cca6030d1f58060ab4031fe 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1541,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 
 discard:
        page_remove_rmap(page, PageHuge(page));
-       page_cache_release(page);
+       put_page(page);
 
 out_unmap:
        pte_unmap_unlock(pte, ptl);
index 9428c51ab2d6b026ee9a10afc7e21c70e0a33d80..719bd6b88d98eaf0958ec57ddb152f22de0f74d0 100644 (file)
@@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt;
 
 #include "internal.h"
 
-#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
-#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
+#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
+#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
 
 /* Pretend that each entry is of this size in directory's i_size */
 #define BOGO_DIRENT_SIZE 20
@@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags,
 static inline int shmem_acct_block(unsigned long flags)
 {
        return (flags & VM_NORESERVE) ?
-               security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
+               security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
 }
 
 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 {
        if (flags & VM_NORESERVE)
-               vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
+               vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 }
 
 static const struct super_operations shmem_ops;
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page,
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
-       page_cache_get(page);
+       get_page(page);
        page->mapping = mapping;
        page->index = index;
 
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page,
        } else {
                page->mapping = NULL;
                spin_unlock_irq(&mapping->tree_lock);
-               page_cache_release(page);
+               put_page(page);
        }
        return error;
 }
@@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
        __dec_zone_page_state(page, NR_FILE_PAGES);
        __dec_zone_page_state(page, NR_SHMEM);
        spin_unlock_irq(&mapping->tree_lock);
-       page_cache_release(page);
+       put_page(page);
        BUG_ON(error);
 }
 
@@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 {
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
-       pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
-       unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
-       unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+       pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       pgoff_t end = (lend + 1) >> PAGE_SHIFT;
+       unsigned int partial_start = lstart & (PAGE_SIZE - 1);
+       unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
        struct pagevec pvec;
        pgoff_t indices[PAGEVEC_SIZE];
        long nr_swaps_freed = 0;
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                struct page *page = NULL;
                shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
                if (page) {
-                       unsigned int top = PAGE_CACHE_SIZE;
+                       unsigned int top = PAGE_SIZE;
                        if (start > end) {
                                top = partial_end;
                                partial_end = 0;
@@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        zero_user_segment(page, partial_start, top);
                        set_page_dirty(page);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
        }
        if (partial_end) {
@@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        zero_user_segment(page, 0, partial_end);
                        set_page_dirty(page);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
        }
        if (start >= end)
@@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
                mem_cgroup_commit_charge(page, memcg, true, false);
 out:
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return error;
 }
 
@@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        if (!newpage)
                return -ENOMEM;
 
-       page_cache_get(newpage);
+       get_page(newpage);
        copy_highpage(newpage, oldpage);
        flush_dcache_page(newpage);
 
@@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        set_page_private(oldpage, 0);
 
        unlock_page(oldpage);
-       page_cache_release(oldpage);
-       page_cache_release(oldpage);
+       put_page(oldpage);
+       put_page(oldpage);
        return error;
 }
 
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        int once = 0;
        int alloced = 0;
 
-       if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
+       if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
                return -EFBIG;
 repeat:
        swap.val = 0;
@@ -1156,7 +1156,7 @@ repeat:
        }
 
        if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-           ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+           ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
                error = -EINVAL;
                goto unlock;
        }
@@ -1169,7 +1169,7 @@ repeat:
                if (sgp != SGP_READ)
                        goto clear;
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                page = NULL;
        }
        if (page || (sgp == SGP_READ && !swap.val)) {
@@ -1327,7 +1327,7 @@ clear:
 
        /* Perhaps the file has been truncated since we checked */
        if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-           ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+           ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
                if (alloced) {
                        ClearPageDirty(page);
                        delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@ failed:
 unlock:
        if (page) {
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (error == -ENOSPC && !once++) {
                info = SHMEM_I(inode);
@@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct shmem_inode_info *info = SHMEM_I(inode);
-       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       pgoff_t index = pos >> PAGE_SHIFT;
 
        /* i_mutex is held by caller */
        if (unlikely(info->seals)) {
@@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping,
                i_size_write(inode, pos + copied);
 
        if (!PageUptodate(page)) {
-               if (copied < PAGE_CACHE_SIZE) {
-                       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+               if (copied < PAGE_SIZE) {
+                       unsigned from = pos & (PAGE_SIZE - 1);
                        zero_user_segments(page, 0, from,
-                                       from + copied, PAGE_CACHE_SIZE);
+                                       from + copied, PAGE_SIZE);
                }
                SetPageUptodate(page);
        }
        set_page_dirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 
        return copied;
 }
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
        if (!iter_is_iovec(to))
                sgp = SGP_DIRTY;
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
-       offset = *ppos & ~PAGE_CACHE_MASK;
+       index = *ppos >> PAGE_SHIFT;
+       offset = *ppos & ~PAGE_MASK;
 
        for (;;) {
                struct page *page = NULL;
@@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                unsigned long nr, ret;
                loff_t i_size = i_size_read(inode);
 
-               end_index = i_size >> PAGE_CACHE_SHIFT;
+               end_index = i_size >> PAGE_SHIFT;
                if (index > end_index)
                        break;
                if (index == end_index) {
-                       nr = i_size & ~PAGE_CACHE_MASK;
+                       nr = i_size & ~PAGE_MASK;
                        if (nr <= offset)
                                break;
                }
@@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                 * We must evaluate after, since reads (unlike writes)
                 * are called without i_mutex protection against truncate
                 */
-               nr = PAGE_CACHE_SIZE;
+               nr = PAGE_SIZE;
                i_size = i_size_read(inode);
-               end_index = i_size >> PAGE_CACHE_SHIFT;
+               end_index = i_size >> PAGE_SHIFT;
                if (index == end_index) {
-                       nr = i_size & ~PAGE_CACHE_MASK;
+                       nr = i_size & ~PAGE_MASK;
                        if (nr <= offset) {
                                if (page)
-                                       page_cache_release(page);
+                                       put_page(page);
                                break;
                        }
                }
@@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                                mark_page_accessed(page);
                } else {
                        page = ZERO_PAGE(0);
-                       page_cache_get(page);
+                       get_page(page);
                }
 
                /*
@@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                ret = copy_page_to_iter(page, offset, nr, to);
                retval += ret;
                offset += ret;
-               index += offset >> PAGE_CACHE_SHIFT;
-               offset &= ~PAGE_CACHE_MASK;
+               index += offset >> PAGE_SHIFT;
+               offset &= ~PAGE_MASK;
 
-               page_cache_release(page);
+               put_page(page);
                if (!iov_iter_count(to))
                        break;
                if (ret < nr) {
@@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                cond_resched();
        }
 
-       *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+       *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
        file_accessed(file);
        return retval ? retval : error;
 }
@@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (splice_grow_spd(pipe, &spd))
                return -ENOMEM;
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
-       loff = *ppos & ~PAGE_CACHE_MASK;
-       req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       index = *ppos >> PAGE_SHIFT;
+       loff = *ppos & ~PAGE_MASK;
+       req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
        nr_pages = min(req_pages, spd.nr_pages_max);
 
        spd.nr_pages = find_get_pages_contig(mapping, index,
@@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
                index++;
        }
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
+       index = *ppos >> PAGE_SHIFT;
        nr_pages = spd.nr_pages;
        spd.nr_pages = 0;
 
@@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
                if (!len)
                        break;
 
-               this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
+               this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
                page = spd.pages[page_nr];
 
                if (!PageUptodate(page) || page->mapping != mapping) {
@@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
                        if (error)
                                break;
                        unlock_page(page);
-                       page_cache_release(spd.pages[page_nr]);
+                       put_page(spd.pages[page_nr]);
                        spd.pages[page_nr] = page;
                }
 
                isize = i_size_read(inode);
-               end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+               end_index = (isize - 1) >> PAGE_SHIFT;
                if (unlikely(!isize || index > end_index))
                        break;
 
                if (end_index == index) {
                        unsigned int plen;
 
-                       plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+                       plen = ((isize - 1) & ~PAGE_MASK) + 1;
                        if (plen <= loff)
                                break;
 
@@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        }
 
        while (page_nr < nr_pages)
-               page_cache_release(spd.pages[page_nr++]);
+               put_page(spd.pages[page_nr++]);
 
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
@@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
        else if (offset >= inode->i_size)
                offset = -ENXIO;
        else {
-               start = offset >> PAGE_CACHE_SHIFT;
-               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               start = offset >> PAGE_SHIFT;
+               end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
                new_offset = shmem_seek_hole_data(mapping, start, end, whence);
-               new_offset <<= PAGE_CACHE_SHIFT;
+               new_offset <<= PAGE_SHIFT;
                if (new_offset > offset) {
                        if (new_offset < inode->i_size)
                                offset = new_offset;
@@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
-       start = offset >> PAGE_CACHE_SHIFT;
-       end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       start = offset >> PAGE_SHIFT;
+       end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        /* Try to avoid a swapstorm if len is impossible to satisfy */
        if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
                error = -ENOSPC;
@@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                if (error) {
                        /* Remove the !PageUptodate pages we added */
                        shmem_undo_range(inode,
-                               (loff_t)start << PAGE_CACHE_SHIFT,
-                               (loff_t)index << PAGE_CACHE_SHIFT, true);
+                               (loff_t)start << PAGE_SHIFT,
+                               (loff_t)index << PAGE_SHIFT, true);
                        goto undone;
                }
 
@@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                 */
                set_page_dirty(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
                cond_resched();
        }
 
@@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
 
        buf->f_type = TMPFS_MAGIC;
-       buf->f_bsize = PAGE_CACHE_SIZE;
+       buf->f_bsize = PAGE_SIZE;
        buf->f_namelen = NAME_MAX;
        if (sbinfo->max_blocks) {
                buf->f_blocks = sbinfo->max_blocks;
@@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        struct shmem_inode_info *info;
 
        len = strlen(symname) + 1;
-       if (len > PAGE_CACHE_SIZE)
+       if (len > PAGE_SIZE)
                return -ENAMETOOLONG;
 
        inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
@@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                SetPageUptodate(page);
                set_page_dirty(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        dir->i_size += BOGO_DIRENT_SIZE;
        dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
                        if (*rest)
                                goto bad_val;
                        sbinfo->max_blocks =
-                               DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
+                               DIV_ROUND_UP(size, PAGE_SIZE);
                } else if (!strcmp(this_char,"nr_blocks")) {
                        sbinfo->max_blocks = memparse(value, &rest);
                        if (*rest)
@@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
 
        if (sbinfo->max_blocks != shmem_default_max_blocks())
                seq_printf(seq, ",size=%luk",
-                       sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
+                       sbinfo->max_blocks << (PAGE_SHIFT - 10));
        if (sbinfo->max_inodes != shmem_default_max_inodes())
                seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
        if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
@@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        sbinfo->free_inodes = sbinfo->max_inodes;
 
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = TMPFS_MAGIC;
        sb->s_op = &shmem_ops;
        sb->s_time_gran = 1;
index 09fe5e97714a2ac9756a537c78b617d9ed7a1ffc..03aacbcb013f2be47efbb15d78387c8b368e2604 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -114,7 +114,7 @@ void put_pages_list(struct list_head *pages)
 
                victim = list_entry(pages->prev, struct page, lru);
                list_del(&victim->lru);
-               page_cache_release(victim);
+               put_page(victim);
        }
 }
 EXPORT_SYMBOL(put_pages_list);
@@ -142,7 +142,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
                        return seg;
 
                pages[seg] = kmap_to_page(kiov[seg].iov_base);
-               page_cache_get(pages[seg]);
+               get_page(pages[seg]);
        }
 
        return seg;
@@ -236,7 +236,7 @@ void rotate_reclaimable_page(struct page *page)
                struct pagevec *pvec;
                unsigned long flags;
 
-               page_cache_get(page);
+               get_page(page);
                local_irq_save(flags);
                pvec = this_cpu_ptr(&lru_rotate_pvecs);
                if (!pagevec_add(pvec, page))
@@ -294,7 +294,7 @@ void activate_page(struct page *page)
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
-               page_cache_get(page);
+               get_page(page);
                if (!pagevec_add(pvec, page))
                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
                put_cpu_var(activate_page_pvecs);
@@ -389,7 +389,7 @@ static void __lru_cache_add(struct page *page)
 {
        struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
-       page_cache_get(page);
+       get_page(page);
        if (!pagevec_space(pvec))
                __pagevec_lru_add(pvec);
        pagevec_add(pvec, page);
@@ -646,7 +646,7 @@ void deactivate_page(struct page *page)
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 
-               page_cache_get(page);
+               get_page(page);
                if (!pagevec_add(pvec, page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
                put_cpu_var(lru_deactivate_pvecs);
@@ -698,7 +698,7 @@ void lru_add_drain_all(void)
 }
 
 /**
- * release_pages - batched page_cache_release()
+ * release_pages - batched put_page()
  * @pages: array of pages to release
  * @nr: number of pages
  * @cold: whether the pages are cache cold
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
                        zone = NULL;
                }
 
+               if (is_huge_zero_page(page)) {
+                       put_huge_zero_page();
+                       continue;
+               }
+
                page = compound_head(page);
                if (!put_page_testzero(page))
                        continue;
index 69cb2464e7dcd598dcf18e8601f97c1c4f8bb627..366ce3518703ecb7cd780b43acd7154adece2b9d 100644 (file)
@@ -85,7 +85,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
-       page_cache_get(page);
+       get_page(page);
        SetPageSwapCache(page);
        set_page_private(page, entry.val);
 
@@ -109,7 +109,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
                VM_BUG_ON(error == -EEXIST);
                set_page_private(page, 0UL);
                ClearPageSwapCache(page);
-               page_cache_release(page);
+               put_page(page);
        }
 
        return error;
@@ -226,7 +226,7 @@ void delete_from_swap_cache(struct page *page)
        spin_unlock_irq(&address_space->tree_lock);
 
        swapcache_free(entry);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /* 
@@ -252,7 +252,7 @@ static inline void free_swap_cache(struct page *page)
 void free_page_and_swap_cache(struct page *page)
 {
        free_swap_cache(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 /*
@@ -380,7 +380,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        } while (err != -ENOMEM);
 
        if (new_page)
-               page_cache_release(new_page);
+               put_page(new_page);
        return found_page;
 }
 
@@ -495,7 +495,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                        continue;
                if (offset != entry_offset)
                        SetPageReadahead(page);
-               page_cache_release(page);
+               put_page(page);
        }
        blk_finish_plug(&plug);
 
index 560ad380634c19661f5606d0397ac85aa62c8add..83874eced5bfa0ac4c889cb0d65ecf22cfa24af0 100644 (file)
@@ -119,7 +119,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
                ret = try_to_free_swap(page);
                unlock_page(page);
        }
-       page_cache_release(page);
+       put_page(page);
        return ret;
 }
 
@@ -1000,7 +1000,7 @@ int free_swap_and_cache(swp_entry_t entry)
                        page = find_get_page(swap_address_space(entry),
                                                entry.val);
                        if (page && !trylock_page(page)) {
-                               page_cache_release(page);
+                               put_page(page);
                                page = NULL;
                        }
                }
@@ -1017,7 +1017,7 @@ int free_swap_and_cache(swp_entry_t entry)
                        SetPageDirty(page);
                }
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
        }
        return p != NULL;
 }
@@ -1518,7 +1518,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
                }
                if (retval) {
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                        break;
                }
 
@@ -1570,7 +1570,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
                 */
                SetPageDirty(page);
                unlock_page(page);
-               page_cache_release(page);
+               put_page(page);
 
                /*
                 * Make sure that we aren't completely killing
@@ -2574,7 +2574,7 @@ bad_swap:
 out:
        if (page && !IS_ERR(page)) {
                kunmap(page);
-               page_cache_release(page);
+               put_page(page);
        }
        if (name)
                putname(name);
index 7598b552ae0310c6490121422fcc58aae7ff010f..b00272810871e1e62cc9eca33a06b15b3576a8c7 100644 (file)
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
                return -EIO;
 
        if (page_has_private(page))
-               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+               do_invalidatepage(page, 0, PAGE_SIZE);
 
        /*
         * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
 {
        if (page_mapped(page)) {
                unmap_mapping_range(mapping,
-                                  (loff_t)page->index << PAGE_CACHE_SHIFT,
-                                  PAGE_CACHE_SIZE, 0);
+                                  (loff_t)page->index << PAGE_SHIFT,
+                                  PAGE_SIZE, 0);
        }
        return truncate_complete_page(mapping, page);
 }
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                return;
 
        /* Offsets within partial pages */
-       partial_start = lstart & (PAGE_CACHE_SIZE - 1);
-       partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+       partial_start = lstart & (PAGE_SIZE - 1);
+       partial_end = (lend + 1) & (PAGE_SIZE - 1);
 
        /*
         * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
         * start of the range and 'partial_end' at the end of the range.
         * Note that 'end' is exclusive while 'lend' is inclusive.
         */
-       start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (lend == -1)
                /*
                 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                 */
                end = -1;
        else
-               end = (lend + 1) >> PAGE_CACHE_SHIFT;
+               end = (lend + 1) >> PAGE_SHIFT;
 
        pagevec_init(&pvec, 0);
        index = start;
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        if (partial_start) {
                struct page *page = find_lock_page(mapping, start - 1);
                if (page) {
-                       unsigned int top = PAGE_CACHE_SIZE;
+                       unsigned int top = PAGE_SIZE;
                        if (start > end) {
                                /* Truncation within a single page */
                                top = partial_end;
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                do_invalidatepage(page, partial_start,
                                                  top - partial_start);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
        }
        if (partial_end) {
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                do_invalidatepage(page, 0,
                                                  partial_end);
                        unlock_page(page);
-                       page_cache_release(page);
+                       put_page(page);
                }
        }
        /*
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        if (mapping->a_ops->freepage)
                mapping->a_ops->freepage(page);
 
-       page_cache_release(page);       /* pagecache ref */
+       put_page(page); /* pagecache ref */
        return 1;
 failed:
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                         * Zap the rest of the file in one hit.
                                         */
                                        unmap_mapping_range(mapping,
-                                          (loff_t)index << PAGE_CACHE_SHIFT,
+                                          (loff_t)index << PAGE_SHIFT,
                                           (loff_t)(1 + end - index)
-                                                        << PAGE_CACHE_SHIFT,
-                                           0);
+                                                        << PAGE_SHIFT,
+                                                        0);
                                        did_range_unmap = 1;
                                } else {
                                        /*
                                         * Just zap this page
                                         */
                                        unmap_mapping_range(mapping,
-                                          (loff_t)index << PAGE_CACHE_SHIFT,
-                                          PAGE_CACHE_SIZE, 0);
+                                          (loff_t)index << PAGE_SHIFT,
+                                          PAGE_SIZE, 0);
                                }
                        }
                        BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 
        WARN_ON(to > inode->i_size);
 
-       if (from >= to || bsize == PAGE_CACHE_SIZE)
+       if (from >= to || bsize == PAGE_SIZE)
                return;
        /* Page straddling @from will not have any hole block created? */
        rounded_from = round_up(from, bsize);
-       if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
+       if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
                return;
 
-       index = from >> PAGE_CACHE_SHIFT;
+       index = from >> PAGE_SHIFT;
        page = find_lock_page(inode->i_mapping, index);
        /* Page not cached? Nothing to do */
        if (!page)
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
        if (page_mkclean(page))
                set_page_dirty(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 EXPORT_SYMBOL(pagecache_isize_extended);
 
index 9f3a0290b2739665c22ab58087d1737aea91ca59..af817e5060fbfbda2be8ba35024c1ad460055b20 100644 (file)
@@ -93,7 +93,7 @@ out_release_uncharge_unlock:
        pte_unmap_unlock(dst_pte, ptl);
        mem_cgroup_cancel_charge(page, memcg, false);
 out_release:
-       page_cache_release(page);
+       put_page(page);
        goto out;
 }
 
@@ -287,7 +287,7 @@ out_unlock:
        up_read(&dst_mm->mmap_sem);
 out:
        if (page)
-               page_cache_release(page);
+               put_page(page);
        BUG_ON(copied < 0);
        BUG_ON(err > 0);
        BUG_ON(!copied && !err);
index b934223eaa456aa664cfd5eb07cab634b70e0386..142cb61f4822454bf3819a4c10c4e8b479a70ec8 100644 (file)
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                sc->gfp_mask |= __GFP_HIGHMEM;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
-                                       requested_highidx, sc->nodemask) {
+                                       gfp_zone(sc->gfp_mask), sc->nodemask) {
                enum zone_type classzone_idx;
 
                if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
        /* Try to sleep for a short interval */
        if (prepare_kswapd_sleep(pgdat, order, remaining,
                                                balanced_classzone_idx)) {
+               /*
+                * Compaction records what page blocks it recently failed to
+                * isolate pages from and skips them in the future scanning.
+                * When kswapd is going to sleep, it is reasonable to assume
+                * that pages and compaction may succeed so reset the cache.
+                */
+               reset_isolation_suitable(pgdat);
+
+               /*
+                * We have freed the memory, now we should compact it to make
+                * allocation of the requested order possible.
+                */
+               wakeup_kcompactd(pgdat, order, classzone_idx);
+
                remaining = schedule_timeout(HZ/10);
                finish_wait(&pgdat->kswapd_wait, &wait);
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
                 */
                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
 
-               /*
-                * Compaction records what page blocks it recently failed to
-                * isolate pages from and skips them in the future scanning.
-                * When kswapd is going to sleep, it is reasonable to assume
-                * that pages and compaction may succeed so reset the cache.
-                */
-               reset_isolation_suitable(pgdat);
-
-               /*
-                * We have freed the memory, now we should compact it to make
-                * allocation of the requested order possible.
-                */
-               wakeup_kcompactd(pgdat, order, classzone_idx);
-
                if (!kthread_should_stop())
                        schedule();
 
index bf14508afd64573d8310a5a3e12b99b629fc8c02..de0f119b1780b2af14d6bf6868f0a8da0f1ee236 100644 (file)
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 static LIST_HEAD(zswap_pools);
 /* protects zswap_pools list modification */
 static DEFINE_SPINLOCK(zswap_pools_lock);
+/* pool counter to provide unique names to zpool */
+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
 /* used by param callback function */
 static bool zswap_init_started;
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
        struct zswap_pool *pool;
+       char name[38]; /* 'zswap' + 32 char (max) num + \0 */
        gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
                return NULL;
        }
 
-       pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
+       /* unique name for each pool specifically required by zsmalloc */
+       snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+
+       pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
        if (!pool->zpool) {
                pr_err("%s zpool not available\n", type);
                goto error;
@@ -869,7 +875,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 
        case ZSWAP_SWAPCACHE_EXIST:
                /* page is already in the swap cache, ignore for now */
-               page_cache_release(page);
+               put_page(page);
                ret = -EEXIST;
                goto fail;
 
@@ -897,7 +903,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 
        /* start writeback */
        __swap_writepage(page, &wbc, end_swap_bio_write);
-       page_cache_release(page);
+       put_page(page);
        zswap_written_back_pages++;
 
        spin_lock(&tree->lock);
index d16bb4b14aa1d8e99fb1c8d215987cf333c5486d..97ecc27aeca695da7de57e66ac741663095886a1 100644 (file)
@@ -3,6 +3,15 @@
 
 #include <linux/netdevice.h>
 
+#include <net/6lowpan.h>
+
+/* caller need to be sure it's dev->type is ARPHRD_6LOWPAN */
+static inline bool lowpan_is_ll(const struct net_device *dev,
+                               enum lowpan_lltypes lltype)
+{
+       return lowpan_dev(dev)->lltype == lltype;
+}
+
 #ifdef CONFIG_6LOWPAN_DEBUGFS
 int lowpan_dev_debugfs_init(struct net_device *dev);
 void lowpan_dev_debugfs_exit(struct net_device *dev);
index 34e44c0c08368eb5699dd101c4a2c3f35adadfab..7a240b3eaed1ff795f99023b64f2ba49802cd7e5 100644 (file)
@@ -27,11 +27,11 @@ int lowpan_register_netdevice(struct net_device *dev,
        dev->mtu = IPV6_MIN_MTU;
        dev->priv_flags |= IFF_NO_QUEUE;
 
-       lowpan_priv(dev)->lltype = lltype;
+       lowpan_dev(dev)->lltype = lltype;
 
-       spin_lock_init(&lowpan_priv(dev)->ctx.lock);
+       spin_lock_init(&lowpan_dev(dev)->ctx.lock);
        for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
-               lowpan_priv(dev)->ctx.table[i].id = i;
+               lowpan_dev(dev)->ctx.table[i].id = i;
 
        ret = register_netdevice(dev);
        if (ret < 0)
@@ -85,7 +85,7 @@ static int lowpan_event(struct notifier_block *unused,
        case NETDEV_DOWN:
                for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
                        clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE,
-                                 &lowpan_priv(dev)->ctx.table[i].flags);
+                                 &lowpan_dev(dev)->ctx.table[i].flags);
                break;
        default:
                return NOTIFY_DONE;
index 0793a815747206629debbbac5d4ac1386958b6c4..acbaa3db493b2085915ab0c2576be778c262ce0f 100644 (file)
@@ -172,7 +172,7 @@ static const struct file_operations lowpan_ctx_pfx_fops = {
 static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
                                       struct dentry *ctx, u8 id)
 {
-       struct lowpan_priv *lpriv = lowpan_priv(dev);
+       struct lowpan_dev *ldev = lowpan_dev(dev);
        struct dentry *dentry, *root;
        char buf[32];
 
@@ -185,25 +185,25 @@ static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
                return -EINVAL;
 
        dentry = debugfs_create_file("active", 0644, root,
-                                    &lpriv->ctx.table[id],
+                                    &ldev->ctx.table[id],
                                     &lowpan_ctx_flag_active_fops);
        if (!dentry)
                return -EINVAL;
 
        dentry = debugfs_create_file("compression", 0644, root,
-                                    &lpriv->ctx.table[id],
+                                    &ldev->ctx.table[id],
                                     &lowpan_ctx_flag_c_fops);
        if (!dentry)
                return -EINVAL;
 
        dentry = debugfs_create_file("prefix", 0644, root,
-                                    &lpriv->ctx.table[id],
+                                    &ldev->ctx.table[id],
                                     &lowpan_ctx_pfx_fops);
        if (!dentry)
                return -EINVAL;
 
        dentry = debugfs_create_file("prefix_len", 0644, root,
-                                    &lpriv->ctx.table[id],
+                                    &ldev->ctx.table[id],
                                     &lowpan_ctx_plen_fops);
        if (!dentry)
                return -EINVAL;
@@ -247,21 +247,21 @@ static const struct file_operations lowpan_context_fops = {
 
 int lowpan_dev_debugfs_init(struct net_device *dev)
 {
-       struct lowpan_priv *lpriv = lowpan_priv(dev);
+       struct lowpan_dev *ldev = lowpan_dev(dev);
        struct dentry *contexts, *dentry;
        int ret, i;
 
        /* creating the root */
-       lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
-       if (!lpriv->iface_debugfs)
+       ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
+       if (!ldev->iface_debugfs)
                goto fail;
 
-       contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs);
+       contexts = debugfs_create_dir("contexts", ldev->iface_debugfs);
        if (!contexts)
                goto remove_root;
 
        dentry = debugfs_create_file("show", 0644, contexts,
-                                    &lowpan_priv(dev)->ctx,
+                                    &lowpan_dev(dev)->ctx,
                                     &lowpan_context_fops);
        if (!dentry)
                goto remove_root;
@@ -282,7 +282,7 @@ fail:
 
 void lowpan_dev_debugfs_exit(struct net_device *dev)
 {
-       debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs);
+       debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs);
 }
 
 int __init lowpan_debugfs_init(void)
index 99bb22aea346d42b55b4fb1dbc7d7cf30bb7c9d8..8501dd532fe1c59f7ba7fc3fe608926fbe2d5f9a 100644 (file)
@@ -53,9 +53,6 @@
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
 
-/* special link-layer handling */
-#include <net/mac802154.h>
-
 #include "6lowpan_i.h"
 #include "nhc.h"
 
         (((a)->s6_addr16[6]) == 0) &&          \
         (((a)->s6_addr[14]) == 0))
 
+#define lowpan_is_linklocal_zero_padded(a)     \
+       (!(hdr->saddr.s6_addr[1] & 0x3f) &&     \
+        !hdr->saddr.s6_addr16[1] &&            \
+        !hdr->saddr.s6_addr32[1])
+
 #define LOWPAN_IPHC_CID_DCI(cid)       (cid & 0x0f)
 #define LOWPAN_IPHC_CID_SCI(cid)       ((cid & 0xf0) >> 4)
 
-static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
-                                               const void *lladdr)
-{
-       /* fe:80::XXXX:XXXX:XXXX:XXXX
-        *        \_________________/
-        *              hwaddr
-        */
-       ipaddr->s6_addr[0] = 0xFE;
-       ipaddr->s6_addr[1] = 0x80;
-       memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
-       /* second bit-flip (Universe/Local)
-        * is done according RFC2464
-        */
-       ipaddr->s6_addr[8] ^= 0x02;
-}
-
-static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
-                                                const void *lladdr)
+static inline void
+lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
+                                    const void *lladdr)
 {
        const struct ieee802154_addr *addr = lladdr;
-       u8 eui64[EUI64_ADDR_LEN] = { };
+       u8 eui64[EUI64_ADDR_LEN];
 
        switch (addr->mode) {
        case IEEE802154_ADDR_LONG:
                ieee802154_le64_to_be64(eui64, &addr->extended_addr);
-               iphc_uncompress_eui64_lladdr(ipaddr, eui64);
+               lowpan_iphc_uncompress_eui64_lladdr(ipaddr, eui64);
                break;
        case IEEE802154_ADDR_SHORT:
                /* fe:80::ff:fe00:XXXX
@@ -202,7 +189,7 @@ static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
 static struct lowpan_iphc_ctx *
 lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id)
 {
-       struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id];
+       struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id];
 
        if (!lowpan_iphc_ctx_is_active(ret))
                return NULL;
@@ -214,7 +201,7 @@ static struct lowpan_iphc_ctx *
 lowpan_iphc_ctx_get_by_addr(const struct net_device *dev,
                            const struct in6_addr *addr)
 {
-       struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+       struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
        struct lowpan_iphc_ctx *ret = NULL;
        struct in6_addr addr_pfx;
        u8 addr_plen;
@@ -258,7 +245,7 @@ static struct lowpan_iphc_ctx *
 lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev,
                                  const struct in6_addr *addr)
 {
-       struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+       struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
        struct lowpan_iphc_ctx *ret = NULL;
        struct in6_addr addr_mcast, network_pfx = {};
        int i;
@@ -296,9 +283,10 @@ lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev,
  *
  * address_mode is the masked value for sam or dam value
  */
-static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
-                          struct in6_addr *ipaddr, u8 address_mode,
-                          const void *lladdr)
+static int lowpan_iphc_uncompress_addr(struct sk_buff *skb,
+                                      const struct net_device *dev,
+                                      struct in6_addr *ipaddr,
+                                      u8 address_mode, const void *lladdr)
 {
        bool fail;
 
@@ -327,12 +315,12 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
        case LOWPAN_IPHC_SAM_11:
        case LOWPAN_IPHC_DAM_11:
                fail = false;
-               switch (lowpan_priv(dev)->lltype) {
+               switch (lowpan_dev(dev)->lltype) {
                case LOWPAN_LLTYPE_IEEE802154:
-                       iphc_uncompress_802154_lladdr(ipaddr, lladdr);
+                       lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
                        break;
                default:
-                       iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+                       lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
                        break;
                }
                break;
@@ -355,11 +343,11 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
 /* Uncompress address function for source context
  * based address(non-multicast).
  */
-static int uncompress_ctx_addr(struct sk_buff *skb,
-                              const struct net_device *dev,
-                              const struct lowpan_iphc_ctx *ctx,
-                              struct in6_addr *ipaddr, u8 address_mode,
-                              const void *lladdr)
+static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb,
+                                          const struct net_device *dev,
+                                          const struct lowpan_iphc_ctx *ctx,
+                                          struct in6_addr *ipaddr,
+                                          u8 address_mode, const void *lladdr)
 {
        bool fail;
 
@@ -388,12 +376,12 @@ static int uncompress_ctx_addr(struct sk_buff *skb,
        case LOWPAN_IPHC_SAM_11:
        case LOWPAN_IPHC_DAM_11:
                fail = false;
-               switch (lowpan_priv(dev)->lltype) {
+               switch (lowpan_dev(dev)->lltype) {
                case LOWPAN_LLTYPE_IEEE802154:
-                       iphc_uncompress_802154_lladdr(ipaddr, lladdr);
+                       lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
                        break;
                default:
-                       iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+                       lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
                        break;
                }
                ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
@@ -652,22 +640,24 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
        }
 
        if (iphc1 & LOWPAN_IPHC_SAC) {
-               spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+               spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
                ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid));
                if (!ci) {
-                       spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+                       spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
                        return -EINVAL;
                }
 
                pr_debug("SAC bit is set. Handle context based source address.\n");
-               err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
-                                         iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
-               spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+               err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
+                                                     iphc1 & LOWPAN_IPHC_SAM_MASK,
+                                                     saddr);
+               spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
        } else {
                /* Source address uncompression */
                pr_debug("source address stateless compression\n");
-               err = uncompress_addr(skb, dev, &hdr.saddr,
-                                     iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
+               err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.saddr,
+                                                 iphc1 & LOWPAN_IPHC_SAM_MASK,
+                                                 saddr);
        }
 
        /* Check on error of previous branch */
@@ -676,10 +666,10 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
 
        switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) {
        case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC:
-               spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+               spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
                ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
                if (!ci) {
-                       spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+                       spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
                        return -EINVAL;
                }
 
@@ -688,7 +678,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
                err = lowpan_uncompress_multicast_ctx_daddr(skb, ci,
                                                            &hdr.daddr,
                                                            iphc1 & LOWPAN_IPHC_DAM_MASK);
-               spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+               spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
                break;
        case LOWPAN_IPHC_M:
                /* multicast */
@@ -696,22 +686,24 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
                                                        iphc1 & LOWPAN_IPHC_DAM_MASK);
                break;
        case LOWPAN_IPHC_DAC:
-               spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+               spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
                ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
                if (!ci) {
-                       spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+                       spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
                        return -EINVAL;
                }
 
                /* Destination address context based uncompression */
                pr_debug("DAC bit is set. Handle context based destination address.\n");
-               err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
-                                         iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
-               spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+               err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
+                                                     iphc1 & LOWPAN_IPHC_DAM_MASK,
+                                                     daddr);
+               spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
                break;
        default:
-               err = uncompress_addr(skb, dev, &hdr.daddr,
-                                     iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
+               err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr,
+                                                 iphc1 & LOWPAN_IPHC_DAM_MASK,
+                                                 daddr);
                pr_debug("dest: stateless compression mode %d dest %pI6c\n",
                         iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
                break;
@@ -731,7 +723,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
                        return err;
        }
 
-       switch (lowpan_priv(dev)->lltype) {
+       switch (lowpan_dev(dev)->lltype) {
        case LOWPAN_LLTYPE_IEEE802154:
                if (lowpan_802154_cb(skb)->d_size)
                        hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
@@ -1028,7 +1020,7 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
                       skb->data, skb->len);
 
        ipv6_daddr_type = ipv6_addr_type(&hdr->daddr);
-       spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+       spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
        if (ipv6_daddr_type & IPV6_ADDR_MULTICAST)
                dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr);
        else
@@ -1037,15 +1029,15 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
                memcpy(&dci_entry, dci, sizeof(*dci));
                cid |= dci->id;
        }
-       spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+       spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 
-       spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+       spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
        sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr);
        if (sci) {
                memcpy(&sci_entry, sci, sizeof(*sci));
                cid |= (sci->id << 4);
        }
-       spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+       spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 
        /* if cid is zero it will be compressed */
        if (cid) {
@@ -1101,7 +1093,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
                                                          true);
                        iphc1 |= LOWPAN_IPHC_SAC;
                } else {
-                       if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL) {
+                       if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL &&
+                           lowpan_is_linklocal_zero_padded(hdr->saddr)) {
                                iphc1 |= lowpan_compress_addr_64(&hc_ptr,
                                                                 &hdr->saddr,
                                                                 saddr, true);
@@ -1135,7 +1128,8 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
                                                          false);
                        iphc1 |= LOWPAN_IPHC_DAC;
                } else {
-                       if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL) {
+                       if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL &&
+                           lowpan_is_linklocal_zero_padded(hdr->daddr)) {
                                iphc1 |= lowpan_compress_addr_64(&hc_ptr,
                                                                 &hdr->daddr,
                                                                 daddr, false);
index 69537a2eaab1317b33a535066a81c7c118aa3d96..225d91906dfad9e958d2106435ad3fb50b20d6f8 100644 (file)
@@ -91,7 +91,7 @@ static int udp_uncompress(struct sk_buff *skb, size_t needed)
         * here, we obtain the hint from the remaining size of the
         * frame
         */
-       switch (lowpan_priv(skb->dev)->lltype) {
+       switch (lowpan_dev(skb->dev)->lltype) {
        case LOWPAN_LLTYPE_IEEE802154:
                if (lowpan_802154_cb(skb)->d_size)
                        uh.len = htons(lowpan_802154_cb(skb)->d_size -
index a8934d8c8fda0c32a22a46919e8147ad60d748bb..b841c42e5c9b7d38b0b62c9d6a31d0a61cc27409 100644 (file)
@@ -236,6 +236,7 @@ source "net/mpls/Kconfig"
 source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
 source "net/l3mdev/Kconfig"
+source "net/qrtr/Kconfig"
 
 config RPS
        bool
index 81d14119eab5416e8576f1fffa5109d752784143..bdd14553a774f9e6ffe14adb772bfe9a20ce9bd3 100644 (file)
@@ -78,3 +78,4 @@ endif
 ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
 obj-y                          += l3mdev/
 endif
+obj-$(CONFIG_QRTR)             += qrtr/
index cd3b37989057fd0b1c5a8b1f49a224fe96d7ba87..e574a7e9db6fbff393670af25acf0e9d0cc42749 100644 (file)
@@ -194,7 +194,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
 static void lec_tx_timeout(struct net_device *dev)
 {
        pr_info("%s\n", dev->name);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        netif_wake_queue(dev);
 }
 
@@ -324,7 +324,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
 out:
        if (entry)
                lec_arp_put(entry);
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        return NETDEV_TX_OK;
 }
 
index cb2d1b9b034058c454fa0b7c5c14e6491ed41b1d..7f98a9d39883029e427d132f670373e78f0a982d 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/jiffies.h>
 #include <linux/list.h>
 #include <linux/kref.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/pkt_sched.h>
 #include <linux/printk.h>
@@ -175,71 +176,107 @@ unlock:
 }
 
 /**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- *  exclude the removed interface
+ * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
- *
- * Return: 0 on success, a negative error code otherwise.
  */
-static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
-                                    int max_if_num, int del_if_num)
+static void
+batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
+                                  int max_if_num, int del_if_num)
 {
-       int ret = -ENOMEM;
-       size_t chunk_size, if_offset;
-       void *data_ptr = NULL;
-
-       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       size_t chunk_size;
+       size_t if_offset;
+       void *data_ptr;
 
-       /* last interface was removed */
-       if (max_if_num == 0)
-               goto free_bcast_own;
+       lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
 
        chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
        data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC);
        if (!data_ptr)
-               goto unlock;
+               /* use old buffer when new one could not be allocated */
+               data_ptr = orig_node->bat_iv.bcast_own;
 
        /* copy first part */
-       memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+       memmove(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
 
        /* copy second part */
        if_offset = (del_if_num + 1) * chunk_size;
-       memcpy((char *)data_ptr + del_if_num * chunk_size,
-              (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
-              (max_if_num - del_if_num) * chunk_size);
+       memmove((char *)data_ptr + del_if_num * chunk_size,
+               (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
+               (max_if_num - del_if_num) * chunk_size);
 
-free_bcast_own:
-       kfree(orig_node->bat_iv.bcast_own);
-       orig_node->bat_iv.bcast_own = data_ptr;
+       /* bcast_own was shrunk down in new buffer; free old one */
+       if (orig_node->bat_iv.bcast_own != data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own);
+               orig_node->bat_iv.bcast_own = data_ptr;
+       }
+}
+
+/**
+ * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ */
+static void
+batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
+                                      int max_if_num, int del_if_num)
+{
+       size_t if_offset;
+       void *data_ptr;
 
-       if (max_if_num == 0)
-               goto free_own_sum;
+       lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
 
        data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
-       if (!data_ptr) {
-               kfree(orig_node->bat_iv.bcast_own);
-               goto unlock;
-       }
+       if (!data_ptr)
+               /* use old buffer when new one could not be allocated */
+               data_ptr = orig_node->bat_iv.bcast_own_sum;
 
-       memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-              del_if_num * sizeof(u8));
+       memmove(data_ptr, orig_node->bat_iv.bcast_own_sum,
+               del_if_num * sizeof(u8));
 
        if_offset = (del_if_num + 1) * sizeof(u8);
-       memcpy((char *)data_ptr + del_if_num * sizeof(u8),
-              orig_node->bat_iv.bcast_own_sum + if_offset,
-              (max_if_num - del_if_num) * sizeof(u8));
+       memmove((char *)data_ptr + del_if_num * sizeof(u8),
+               orig_node->bat_iv.bcast_own_sum + if_offset,
+               (max_if_num - del_if_num) * sizeof(u8));
+
+       /* bcast_own_sum was shrunk down in new buffer; free old one */
+       if (orig_node->bat_iv.bcast_own_sum != data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own_sum);
+               orig_node->bat_iv.bcast_own_sum = data_ptr;
+       }
+}
 
-free_own_sum:
-       kfree(orig_node->bat_iv.bcast_own_sum);
-       orig_node->bat_iv.bcast_own_sum = data_ptr;
+/**
+ * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
+ *  exclude the removed interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+                                    int max_if_num, int del_if_num)
+{
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       if (max_if_num == 0) {
+               kfree(orig_node->bat_iv.bcast_own);
+               kfree(orig_node->bat_iv.bcast_own_sum);
+               orig_node->bat_iv.bcast_own = NULL;
+               orig_node->bat_iv.bcast_own_sum = NULL;
+       } else {
+               batadv_iv_ogm_drop_bcast_own_entry(orig_node, max_if_num,
+                                                  del_if_num);
+               batadv_iv_ogm_drop_bcast_own_sum_entry(orig_node, max_if_num,
+                                                      del_if_num);
+       }
 
-       ret = 0;
-unlock:
        spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -644,18 +681,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
        unsigned char *skb_buff;
        unsigned int skb_size;
 
-       if (!kref_get_unless_zero(&if_incoming->refcount))
-               return;
-
-       if (!kref_get_unless_zero(&if_outgoing->refcount))
-               goto out_free_incoming;
-
        /* own packet should always be scheduled */
        if (!own_packet) {
                if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
                        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                                   "batman packet queue full\n");
-                       goto out_free_outgoing;
+                       return;
                }
        }
 
@@ -681,6 +712,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
        forw_packet_aggr->packet_len = packet_len;
        memcpy(skb_buff, packet_buff, packet_len);
 
+       kref_get(&if_incoming->refcount);
+       kref_get(&if_outgoing->refcount);
        forw_packet_aggr->own = own_packet;
        forw_packet_aggr->if_incoming = if_incoming;
        forw_packet_aggr->if_outgoing = if_outgoing;
@@ -710,10 +743,6 @@ out_free_forw_packet:
 out_nomem:
        if (!own_packet)
                atomic_inc(&bat_priv->batman_queue_left);
-out_free_outgoing:
-       batadv_hardif_put(if_outgoing);
-out_free_incoming:
-       batadv_hardif_put(if_incoming);
 }
 
 /* aggregate a new packet into the existing ogm packet */
@@ -950,9 +979,15 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
                if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
                        continue;
+
+               if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
+                       continue;
+
                batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
                                        *ogm_buff_len, hard_iface,
                                        tmp_hard_iface, 1, send_time);
+
+               batadv_hardif_put(tmp_hard_iface);
        }
        rcu_read_unlock();
 
@@ -1133,13 +1168,13 @@ out:
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Return: 1 if the link can be considered bidirectional, 0 otherwise
+ * Return: true if the link can be considered bidirectional, false otherwise
  */
-static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
-                                struct batadv_orig_node *orig_neigh_node,
-                                struct batadv_ogm_packet *batadv_ogm_packet,
-                                struct batadv_hard_iface *if_incoming,
-                                struct batadv_hard_iface *if_outgoing)
+static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+                                 struct batadv_orig_node *orig_neigh_node,
+                                 struct batadv_ogm_packet *batadv_ogm_packet,
+                                 struct batadv_hard_iface *if_incoming,
+                                 struct batadv_hard_iface *if_outgoing)
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
@@ -1147,9 +1182,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        u8 total_count;
        u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-       int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
+       int tq_asym_penalty, inv_asym_penalty, if_num;
        unsigned int combined_tq;
        int tq_iface_penalty;
+       bool ret = false;
 
        /* find corresponding one hop neighbor */
        rcu_read_lock();
@@ -1261,7 +1297,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
         * consider it bidirectional
         */
        if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
-               ret = 1;
+               ret = true;
 
 out:
        if (neigh_node)
@@ -1290,9 +1326,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        struct batadv_orig_ifinfo *orig_ifinfo = NULL;
        struct batadv_neigh_node *neigh_node;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
-       int is_dup;
+       bool is_dup;
        s32 seq_diff;
-       int need_update = 0;
+       bool need_update = false;
        int set_mark;
        enum batadv_dup_status ret = BATADV_NO_DUP;
        u32 seqno = ntohl(batadv_ogm_packet->seqno);
@@ -1402,7 +1438,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
        struct sk_buff *skb_priv;
        struct ethhdr *ethhdr;
        u8 *prev_sender;
-       int is_bidirect;
+       bool is_bidirect;
 
        /* create a private copy of the skb, as some functions change tq value
         * and/or flags.
@@ -1730,8 +1766,13 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                if (hard_iface->soft_iface != bat_priv->soft_iface)
                        continue;
 
+               if (!kref_get_unless_zero(&hard_iface->refcount))
+                       continue;
+
                batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
                                                if_incoming, hard_iface);
+
+               batadv_hardif_put(hard_iface);
        }
        rcu_read_unlock();
 
@@ -1829,9 +1870,8 @@ static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
        int batman_count = 0;
        u32 i;
 
-       seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-                  "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
-                  "Nexthop", "outgoingIF", "Potential nexthops");
+       seq_puts(seq,
+                "  Originator      last-seen (#/255)           Nexthop [outgoingIF]:   Potential nexthops ...\n");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1911,8 +1951,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
        struct batadv_hard_iface *hard_iface;
        int batman_count = 0;
 
-       seq_printf(seq, "   %10s        %-13s %s\n",
-                  "IF", "Neighbor", "last-seen");
+       seq_puts(seq, "           IF        Neighbor      last-seen\n");
 
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
index 3315b9a598af0f71a95a080d450cf1faf3a70a1d..3ff8bd1b7bdcf6fa9704a5d73461354b7b22a883 100644 (file)
 
 #include "bat_v_elp.h"
 #include "bat_v_ogm.h"
+#include "hard-interface.h"
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
 
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+       /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+        * set the interface as ACTIVE right away, without any risk of race
+        * condition
+        */
+       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
 static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
 {
        int ret;
@@ -151,8 +162,8 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
        struct batadv_hard_iface *hard_iface;
        int batman_count = 0;
 
-       seq_printf(seq, "  %-15s %s (%11s) [%10s]\n", "Neighbor",
-                  "last-seen", "throughput", "IF");
+       seq_puts(seq,
+                "  Neighbor        last-seen ( throughput) [        IF]\n");
 
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -191,9 +202,8 @@ static void batadv_v_orig_print(struct batadv_priv *bat_priv,
        int batman_count = 0;
        u32 i;
 
-       seq_printf(seq, "  %-15s %s (%11s) %17s [%10s]: %20s ...\n",
-                  "Originator", "last-seen", "throughput", "Nexthop",
-                  "outgoingIF", "Potential nexthops");
+       seq_puts(seq,
+                "  Originator      last-seen ( throughput)           Nexthop [outgoingIF]:   Potential nexthops ...\n");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -274,6 +284,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
 
 static struct batadv_algo_ops batadv_batman_v __read_mostly = {
        .name = "BATMAN_V",
+       .bat_iface_activate = batadv_v_iface_activate,
        .bat_iface_enable = batadv_v_iface_enable,
        .bat_iface_disable = batadv_v_iface_disable,
        .bat_iface_update_mac = batadv_v_iface_update_mac,
index d9bcbe6e7d65a8cbb646e842fdca560d3b4a104a..473ebb9a0e737a23c29c7293cfe193959f1aec07 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/random.h>
@@ -176,6 +177,9 @@ static void batadv_v_ogm_send(struct work_struct *work)
                if (hard_iface->soft_iface != bat_priv->soft_iface)
                        continue;
 
+               if (!kref_get_unless_zero(&hard_iface->refcount))
+                       continue;
+
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
                           ogm_packet->orig, ntohl(ogm_packet->seqno),
@@ -185,10 +189,13 @@ static void batadv_v_ogm_send(struct work_struct *work)
 
                /* this skb gets consumed by batadv_v_ogm_send_to_if() */
                skb_tmp = skb_clone(skb, GFP_ATOMIC);
-               if (!skb_tmp)
+               if (!skb_tmp) {
+                       batadv_hardif_put(hard_iface);
                        break;
+               }
 
                batadv_v_ogm_send_to_if(skb_tmp, hard_iface);
+               batadv_hardif_put(hard_iface);
        }
        rcu_read_unlock();
 
@@ -233,73 +240,6 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
        ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
 }
 
-/**
- * batadv_v_ogm_orig_update - update the originator status based on the received
- *  OGM
- * @bat_priv: the bat priv with all the soft interface information
- * @orig_node: the originator to update
- * @neigh_node: the neighbour the OGM has been received from (to update)
- * @ogm2: the received OGM
- * @if_outgoing: the interface where this OGM is going to be forwarded through
- */
-static void
-batadv_v_ogm_orig_update(struct batadv_priv *bat_priv,
-                        struct batadv_orig_node *orig_node,
-                        struct batadv_neigh_node *neigh_node,
-                        const struct batadv_ogm2_packet *ogm2,
-                        struct batadv_hard_iface *if_outgoing)
-{
-       struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
-       struct batadv_neigh_node *router = NULL;
-       s32 neigh_seq_diff;
-       u32 neigh_last_seqno;
-       u32 router_last_seqno;
-       u32 router_throughput, neigh_throughput;
-
-       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Searching and updating originator entry of received packet\n");
-
-       /* if this neighbor already is our next hop there is nothing
-        * to change
-        */
-       router = batadv_orig_router_get(orig_node, if_outgoing);
-       if (router == neigh_node)
-               goto out;
-
-       /* don't consider neighbours with worse throughput.
-        * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
-        * the last received seqno from our best next hop.
-        */
-       if (router) {
-               router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
-               neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
-
-               /* if these are not allocated, something is wrong. */
-               if (!router_ifinfo || !neigh_ifinfo)
-                       goto out;
-
-               neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
-               router_last_seqno = router_ifinfo->bat_v.last_seqno;
-               neigh_seq_diff = neigh_last_seqno - router_last_seqno;
-               router_throughput = router_ifinfo->bat_v.throughput;
-               neigh_throughput = neigh_ifinfo->bat_v.throughput;
-
-               if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) &&
-                   (router_throughput >= neigh_throughput))
-                       goto out;
-       }
-
-       batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
-
-out:
-       if (router_ifinfo)
-               batadv_neigh_ifinfo_put(router_ifinfo);
-       if (neigh_ifinfo)
-               batadv_neigh_ifinfo_put(neigh_ifinfo);
-       if (router)
-               batadv_neigh_node_put(router);
-}
-
 /**
  * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
  *  with B.A.T.M.A.N. V OGMs
@@ -347,10 +287,12 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_forward - forward an OGM to the given outgoing interface
+ * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
- * @throughput: throughput to announce, may vary per outgoing interface
+ * @orig_node: the originator which has been updated
+ * @neigh_node: the neigh_node through with the OGM has been received
  * @if_incoming: the interface on which this OGM was received on
  * @if_outgoing: the interface to which the OGM has to be forwarded to
  *
@@ -359,28 +301,57 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
  */
 static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
                                 const struct batadv_ogm2_packet *ogm_received,
-                                u32 throughput,
+                                struct batadv_orig_node *orig_node,
+                                struct batadv_neigh_node *neigh_node,
                                 struct batadv_hard_iface *if_incoming,
                                 struct batadv_hard_iface *if_outgoing)
 {
+       struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+       struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+       struct batadv_neigh_node *router = NULL;
        struct batadv_ogm2_packet *ogm_forward;
        unsigned char *skb_buff;
        struct sk_buff *skb;
        size_t packet_len;
        u16 tvlv_len;
 
+       /* only forward for specific interfaces, not for the default one. */
+       if (if_outgoing == BATADV_IF_DEFAULT)
+               goto out;
+
+       orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+       if (!orig_ifinfo)
+               goto out;
+
+       /* acquire possibly updated router */
+       router = batadv_orig_router_get(orig_node, if_outgoing);
+
+       /* strict rule: forward packets coming from the best next hop only */
+       if (neigh_node != router)
+               goto out;
+
+       /* don't forward the same seqno twice on one interface */
+       if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno))
+               goto out;
+
+       orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno);
+
        if (ogm_received->ttl <= 1) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
-               return;
+               goto out;
        }
 
+       neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+       if (!neigh_ifinfo)
+               goto out;
+
        tvlv_len = ntohs(ogm_received->tvlv_len);
 
        packet_len = BATADV_OGM2_HLEN + tvlv_len;
        skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
                                        ETH_HLEN + packet_len);
        if (!skb)
-               return;
+               goto out;
 
        skb_reserve(skb, ETH_HLEN);
        skb_buff = skb_put(skb, packet_len);
@@ -388,15 +359,23 @@ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
 
        /* apply forward penalty */
        ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
-       ogm_forward->throughput = htonl(throughput);
+       ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
        ogm_forward->ttl--;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
-                  if_outgoing->net_dev->name, throughput, ogm_forward->ttl,
-                  if_incoming->net_dev->name);
+                  if_outgoing->net_dev->name, ntohl(ogm_forward->throughput),
+                  ogm_forward->ttl, if_incoming->net_dev->name);
 
        batadv_v_ogm_send_to_if(skb, if_outgoing);
+
+out:
+       if (orig_ifinfo)
+               batadv_orig_ifinfo_put(orig_ifinfo);
+       if (router)
+               batadv_neigh_node_put(router);
+       if (neigh_ifinfo)
+               batadv_neigh_ifinfo_put(neigh_ifinfo);
 }
 
 /**
@@ -493,8 +472,10 @@ out:
  * @neigh_node: the neigh_node through with the OGM has been received
  * @if_incoming: the interface where this packet was received
  * @if_outgoing: the interface for which the packet should be considered
+ *
+ * Return: true if the packet should be forwarded, false otherwise
  */
-static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
+static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
                                      const struct ethhdr *ethhdr,
                                      const struct batadv_ogm2_packet *ogm2,
                                      struct batadv_orig_node *orig_node,
@@ -503,14 +484,14 @@ static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
                                      struct batadv_hard_iface *if_outgoing)
 {
        struct batadv_neigh_node *router = NULL;
-       struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
        struct batadv_orig_node *orig_neigh_node = NULL;
-       struct batadv_orig_ifinfo *orig_ifinfo = NULL;
        struct batadv_neigh_node *orig_neigh_router = NULL;
-
-       neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
-       if (!neigh_ifinfo)
-               goto out;
+       struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
+       u32 router_throughput, neigh_throughput;
+       u32 router_last_seqno;
+       u32 neigh_last_seqno;
+       s32 neigh_seq_diff;
+       bool forward = false;
 
        orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
        if (!orig_neigh_node)
@@ -529,47 +510,57 @@ static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
                goto out;
        }
 
-       if (router)
-               batadv_neigh_node_put(router);
+       /* Mark the OGM to be considered for forwarding, and update routes
+        * if needed.
+        */
+       forward = true;
 
-       /* Update routes, and check if the OGM is from the best next hop */
-       batadv_v_ogm_orig_update(bat_priv, orig_node, neigh_node, ogm2,
-                                if_outgoing);
+       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                  "Searching and updating originator entry of received packet\n");
 
-       orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
-       if (!orig_ifinfo)
+       /* if this neighbor already is our next hop there is nothing
+        * to change
+        */
+       if (router == neigh_node)
                goto out;
 
-       /* don't forward the same seqno twice on one interface */
-       if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm2->seqno))
-               goto out;
+       /* don't consider neighbours with worse throughput.
+        * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
+        * the last received seqno from our best next hop.
+        */
+       if (router) {
+               router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+               neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
 
-       /* acquire possibly updated router */
-       router = batadv_orig_router_get(orig_node, if_outgoing);
+               /* if these are not allocated, something is wrong. */
+               if (!router_ifinfo || !neigh_ifinfo)
+                       goto out;
 
-       /* strict rule: forward packets coming from the best next hop only */
-       if (neigh_node != router)
-               goto out;
+               neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
+               router_last_seqno = router_ifinfo->bat_v.last_seqno;
+               neigh_seq_diff = neigh_last_seqno - router_last_seqno;
+               router_throughput = router_ifinfo->bat_v.throughput;
+               neigh_throughput = neigh_ifinfo->bat_v.throughput;
 
-       /* only forward for specific interface, not for the default one. */
-       if (if_outgoing != BATADV_IF_DEFAULT) {
-               orig_ifinfo->last_seqno_forwarded = ntohl(ogm2->seqno);
-               batadv_v_ogm_forward(bat_priv, ogm2,
-                                    neigh_ifinfo->bat_v.throughput,
-                                    if_incoming, if_outgoing);
+               if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) &&
+                   (router_throughput >= neigh_throughput))
+                       goto out;
        }
 
+       batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
 out:
-       if (orig_ifinfo)
-               batadv_orig_ifinfo_put(orig_ifinfo);
        if (router)
                batadv_neigh_node_put(router);
        if (orig_neigh_router)
                batadv_neigh_node_put(orig_neigh_router);
        if (orig_neigh_node)
                batadv_orig_node_put(orig_neigh_node);
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_put(router_ifinfo);
        if (neigh_ifinfo)
                batadv_neigh_ifinfo_put(neigh_ifinfo);
+
+       return forward;
 }
 
 /**
@@ -592,6 +583,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
                               struct batadv_hard_iface *if_outgoing)
 {
        int seqno_age;
+       bool forward;
 
        /* first, update the metric with according sanity checks */
        seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
@@ -610,8 +602,14 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
                                               ntohs(ogm2->tvlv_len));
 
        /* if the metric update went through, update routes if needed */
-       batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
-                                 neigh_node, if_incoming, if_outgoing);
+       forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
+                                           neigh_node, if_incoming,
+                                           if_outgoing);
+
+       /* if the routes have been processed correctly, check and forward */
+       if (forward)
+               batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node,
+                                    if_incoming, if_outgoing);
 }
 
 /**
@@ -713,9 +711,14 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
                if (hard_iface->soft_iface != bat_priv->soft_iface)
                        continue;
 
+               if (!kref_get_unless_zero(&hard_iface->refcount))
+                       continue;
+
                batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
                                               orig_node, neigh_node,
                                               if_incoming, hard_iface);
+
+               batadv_hardif_put(hard_iface);
        }
        rcu_read_unlock();
 out:
index b56bb000a0abcbb2fe547612d12009a18e69d530..a0c7913837a58af3a293c98720fa8f5480c6450a 100644 (file)
@@ -38,11 +38,11 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
  *  the last sequence number
  * @set_mark: whether this packet should be marked in seq_bits
  *
- * Return: 1 if the window was moved (either new or very old),
- *  0 if the window was not moved/shifted.
+ * Return: true if the window was moved (either new or very old),
+ *  false if the window was not moved/shifted.
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
-                         int set_mark)
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+                          s32 seq_num_diff, int set_mark)
 {
        struct batadv_priv *bat_priv = priv;
 
@@ -52,7 +52,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
        if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
                if (set_mark)
                        batadv_set_bit(seq_bits, -seq_num_diff);
-               return 0;
+               return false;
        }
 
        /* sequence number is slightly newer, so we shift the window and
@@ -63,7 +63,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
 
                if (set_mark)
                        batadv_set_bit(seq_bits, 0);
-               return 1;
+               return true;
        }
 
        /* sequence number is much newer, probably missed a lot of packets */
@@ -75,7 +75,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
                bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
                if (set_mark)
                        batadv_set_bit(seq_bits, 0);
-               return 1;
+               return true;
        }
 
        /* received a much older packet. The other host either restarted
@@ -94,5 +94,5 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
        if (set_mark)
                batadv_set_bit(seq_bits, 0);
 
-       return 1;
+       return true;
 }
index 3e41bb80eb81ac34dce4c3c34fa33a3643b19bdc..0e6e9d09078cf176e7b426615666e5f51def5fbd 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/bitops.h>
 #include <linux/compiler.h>
+#include <linux/stddef.h>
 #include <linux/types.h>
 
 /**
  * @last_seqno: latest sequence number in seq_bits
  * @curr_seqno: sequence number to test for
  *
- * Return: 1 if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno. Otherwise returns 0.
+ * Return: true if the corresponding bit in the given seq_bits indicates true
+ * and curr_seqno is within range of last_seqno. Otherwise returns false.
  */
-static inline int batadv_test_bit(const unsigned long *seq_bits,
-                                 u32 last_seqno, u32 curr_seqno)
+static inline bool batadv_test_bit(const unsigned long *seq_bits,
+                                  u32 last_seqno, u32 curr_seqno)
 {
        s32 diff;
 
        diff = last_seqno - curr_seqno;
        if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
-               return 0;
+               return false;
        return test_bit(diff, seq_bits) != 0;
 }
 
@@ -55,7 +56,7 @@ static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
        set_bit(n, seq_bits); /* turn the position on */
 }
 
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
-                         int set_mark);
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+                          s32 seq_num_diff, int set_mark);
 
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
index 0a6c8b824a007a61077771d154462757431e810e..748a9ead7ce50fd65a0a09f2b19eaa035a566327 100644 (file)
@@ -50,6 +50,7 @@
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
+#include "sysfs.h"
 #include "translation-table.h"
 
 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@ -100,10 +101,10 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
- * Return: 1 if the backbones have the same data, 0 otherwise
+ * Return: true if the backbones have the same data, false otherwise
  */
-static int batadv_compare_backbone_gw(const struct hlist_node *node,
-                                     const void *data2)
+static bool batadv_compare_backbone_gw(const struct hlist_node *node,
+                                      const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
                                         hash_entry);
@@ -111,23 +112,23 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
        const struct batadv_bla_backbone_gw *gw2 = data2;
 
        if (!batadv_compare_eth(gw1->orig, gw2->orig))
-               return 0;
+               return false;
 
        if (gw1->vid != gw2->vid)
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two claims
+ * batadv_compare_claim - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
- * Return: 1 if the claim have the same data, 0 otherwise
+ * Return: true if the claim have the same data, 0 otherwise
  */
-static int batadv_compare_claim(const struct hlist_node *node,
-                               const void *data2)
+static bool batadv_compare_claim(const struct hlist_node *node,
+                                const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_bla_claim,
                                         hash_entry);
@@ -135,12 +136,12 @@ static int batadv_compare_claim(const struct hlist_node *node,
        const struct batadv_bla_claim *cl2 = data2;
 
        if (!batadv_compare_eth(cl1->addr, cl2->addr))
-               return 0;
+               return false;
 
        if (cl1->vid != cl2->vid)
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /**
@@ -200,9 +201,9 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
  *
  * Return: claim if found or NULL otherwise.
  */
-static struct batadv_bla_claim
-*batadv_claim_hash_find(struct batadv_priv *bat_priv,
-                       struct batadv_bla_claim *data)
+static struct batadv_bla_claim *
+batadv_claim_hash_find(struct batadv_priv *bat_priv,
+                      struct batadv_bla_claim *data)
 {
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct hlist_head *head;
@@ -407,6 +408,14 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                           ethhdr->h_source, ethhdr->h_dest,
                           BATADV_PRINT_VID(vid));
                break;
+       case BATADV_CLAIM_TYPE_LOOPDETECT:
+               ether_addr_copy(ethhdr->h_source, mac);
+               batadv_dbg(BATADV_DBG_BLA, bat_priv,
+                          "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
+                          ethhdr->h_source, ethhdr->h_dest,
+                          BATADV_PRINT_VID(vid));
+
+               break;
        }
 
        if (vid & BATADV_VLAN_HAS_TAG)
@@ -426,6 +435,36 @@ out:
                batadv_hardif_put(primary_if);
 }
 
+/**
+ * batadv_bla_loopdetect_report - worker for reporting the loop
+ * @work: work queue item
+ *
+ * Throws an uevent, as the loopdetect check function can't do that itself
+ * since the kernel may sleep while throwing uevents.
+ */
+static void batadv_bla_loopdetect_report(struct work_struct *work)
+{
+       struct batadv_bla_backbone_gw *backbone_gw;
+       struct batadv_priv *bat_priv;
+       char vid_str[6] = { '\0' };
+
+       backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
+                                  report_work);
+       bat_priv = backbone_gw->bat_priv;
+
+       batadv_info(bat_priv->soft_iface,
+                   "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
+                   BATADV_PRINT_VID(backbone_gw->vid));
+       snprintf(vid_str, sizeof(vid_str), "%d",
+                BATADV_PRINT_VID(backbone_gw->vid));
+       vid_str[sizeof(vid_str) - 1] = 0;
+
+       batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
+                           vid_str);
+
+       batadv_backbone_gw_put(backbone_gw);
+}
+
 /**
  * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
@@ -464,6 +503,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
        atomic_set(&entry->request_sent, 0);
        atomic_set(&entry->wait_periods, 0);
        ether_addr_copy(entry->orig, orig);
+       INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
 
        /* one for the hash, one for returning */
        kref_init(&entry->refcount);
@@ -735,22 +775,22 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
-                                 u8 *backbone_addr, unsigned short vid)
+static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+                                  u8 *backbone_addr, unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        u16 backbone_crc, crc;
 
        if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
-               return 0;
+               return false;
 
        backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
                                                 false);
 
        if (unlikely(!backbone_gw))
-               return 1;
+               return true;
 
        /* handle as ANNOUNCE frame */
        backbone_gw->lasttime = jiffies;
@@ -783,7 +823,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
        }
 
        batadv_backbone_gw_put(backbone_gw);
-       return 1;
+       return true;
 }
 
 /**
@@ -794,29 +834,29 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
  * @ethhdr: ethernet header of a packet
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_request(struct batadv_priv *bat_priv,
-                                struct batadv_hard_iface *primary_if,
-                                u8 *backbone_addr, struct ethhdr *ethhdr,
-                                unsigned short vid)
+static bool batadv_handle_request(struct batadv_priv *bat_priv,
+                                 struct batadv_hard_iface *primary_if,
+                                 u8 *backbone_addr, struct ethhdr *ethhdr,
+                                 unsigned short vid)
 {
        /* check for REQUEST frame */
        if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
-               return 0;
+               return false;
 
        /* sanity check, this should not happen on a normal switch,
         * we ignore it in this case.
         */
        if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
-               return 1;
+               return true;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
                   BATADV_PRINT_VID(vid), ethhdr->h_source);
 
        batadv_bla_answer_request(bat_priv, primary_if, vid);
-       return 1;
+       return true;
 }
 
 /**
@@ -827,12 +867,12 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
  * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
-                                struct batadv_hard_iface *primary_if,
-                                u8 *backbone_addr, u8 *claim_addr,
-                                unsigned short vid)
+static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
+                                 struct batadv_hard_iface *primary_if,
+                                 u8 *backbone_addr, u8 *claim_addr,
+                                 unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -845,7 +885,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
        if (!backbone_gw)
-               return 1;
+               return true;
 
        /* this must be an UNCLAIM frame */
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
@@ -854,7 +894,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
 
        batadv_bla_del_claim(bat_priv, claim_addr, vid);
        batadv_backbone_gw_put(backbone_gw);
-       return 1;
+       return true;
 }
 
 /**
@@ -865,12 +905,12 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
  * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_claim(struct batadv_priv *bat_priv,
-                              struct batadv_hard_iface *primary_if,
-                              u8 *backbone_addr, u8 *claim_addr,
-                              unsigned short vid)
+static bool batadv_handle_claim(struct batadv_priv *bat_priv,
+                               struct batadv_hard_iface *primary_if,
+                               u8 *backbone_addr, u8 *claim_addr,
+                               unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -880,7 +920,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
                                                 false);
 
        if (unlikely(!backbone_gw))
-               return 1;
+               return true;
 
        /* this must be a CLAIM frame */
        batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
@@ -891,7 +931,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
        /* TODO: we could call something like tt_local_del() here. */
 
        batadv_backbone_gw_put(backbone_gw);
-       return 1;
+       return true;
 }
 
 /**
@@ -975,12 +1015,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
- * Return: 1 if it was a claim frame, otherwise return 0 to
+ * Return: true if it was a claim frame, otherwise return false to
  * tell the callee that it can use the frame on its own.
  */
-static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
-                                   struct batadv_hard_iface *primary_if,
-                                   struct sk_buff *skb)
+static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
+                                    struct batadv_hard_iface *primary_if,
+                                    struct sk_buff *skb)
 {
        struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
        u8 *hw_src, *hw_dst;
@@ -1011,7 +1051,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                        vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
                                                  &vhdr_buf);
                        if (!vhdr)
-                               return 0;
+                               return false;
 
                        proto = vhdr->h_vlan_encapsulated_proto;
                        headlen += VLAN_HLEN;
@@ -1020,12 +1060,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        }
 
        if (proto != htons(ETH_P_ARP))
-               return 0; /* not a claim frame */
+               return false; /* not a claim frame */
 
        /* this must be a ARP frame. check if it is a claim. */
 
        if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
-               return 0;
+               return false;
 
        /* pskb_may_pull() may have modified the pointers, get ethhdr again */
        ethhdr = eth_hdr(skb);
@@ -1035,13 +1075,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
         * IP information
         */
        if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
-               return 0;
+               return false;
        if (arphdr->ar_pro != htons(ETH_P_IP))
-               return 0;
+               return false;
        if (arphdr->ar_hln != ETH_ALEN)
-               return 0;
+               return false;
        if (arphdr->ar_pln != 4)
-               return 0;
+               return false;
 
        hw_src = (u8 *)arphdr + sizeof(struct arphdr);
        hw_dst = hw_src + ETH_ALEN + 4;
@@ -1051,14 +1091,18 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        /* check if it is a claim frame in general */
        if (memcmp(bla_dst->magic, bla_dst_own->magic,
                   sizeof(bla_dst->magic)) != 0)
-               return 0;
+               return false;
 
        /* check if there is a claim frame encapsulated deeper in (QinQ) and
         * drop that, as this is not supported by BLA but should also not be
         * sent via the mesh.
         */
        if (vlan_depth > 1)
-               return 1;
+               return true;
+
+       /* Let the loopdetect frames on the mesh in any case. */
+       if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
+               return 0;
 
        /* check if it is a claim frame. */
        ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
@@ -1070,7 +1114,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                           hw_dst);
 
        if (ret < 2)
-               return ret;
+               return !!ret;
 
        /* become a backbone gw ourselves on this vlan if not happened yet */
        batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
@@ -1080,30 +1124,30 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        case BATADV_CLAIM_TYPE_CLAIM:
                if (batadv_handle_claim(bat_priv, primary_if, hw_src,
                                        ethhdr->h_source, vid))
-                       return 1;
+                       return true;
                break;
        case BATADV_CLAIM_TYPE_UNCLAIM:
                if (batadv_handle_unclaim(bat_priv, primary_if,
                                          ethhdr->h_source, hw_src, vid))
-                       return 1;
+                       return true;
                break;
 
        case BATADV_CLAIM_TYPE_ANNOUNCE:
                if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
                                           vid))
-                       return 1;
+                       return true;
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
                                          vid))
-                       return 1;
+                       return true;
                break;
        }
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
                   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
-       return 1;
+       return true;
 }
 
 /**
@@ -1264,6 +1308,26 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
        }
 }
 
+/**
+ * batadv_bla_send_loopdetect - send a loopdetect frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: the backbone gateway for which a loop should be detected
+ *
+ * To detect loops that the bridge loop avoidance can't handle, send a loop
+ * detection packet on the backbone. Unlike other BLA frames, this frame will
+ * be allowed on the mesh by other nodes. If it is received on the mesh, this
+ * indicates that there is a loop.
+ */
+static void
+batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
+                          struct batadv_bla_backbone_gw *backbone_gw)
+{
+       batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
+                  backbone_gw->vid);
+       batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
+                             backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
+}
+
 /**
  * batadv_bla_status_update - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
@@ -1301,9 +1365,10 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hashtable *hash;
        struct batadv_hard_iface *primary_if;
+       bool send_loopdetect = false;
        int i;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
        bat_priv = container_of(priv_bla, struct batadv_priv, bla);
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1316,6 +1381,22 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto out;
 
+       if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
+               /* set a new random mac address for the next bridge loop
+                * detection frames. Set the locally administered bit to avoid
+                * collisions with users mac addresses.
+                */
+               random_ether_addr(bat_priv->bla.loopdetect_addr);
+               bat_priv->bla.loopdetect_addr[0] = 0xba;
+               bat_priv->bla.loopdetect_addr[1] = 0xbe;
+               bat_priv->bla.loopdetect_lasttime = jiffies;
+               atomic_set(&bat_priv->bla.loopdetect_next,
+                          BATADV_BLA_LOOPDETECT_PERIODS);
+
+               /* mark for sending loop detect on all VLANs */
+               send_loopdetect = true;
+       }
+
        hash = bat_priv->bla.backbone_hash;
        if (!hash)
                goto out;
@@ -1332,6 +1413,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
                        backbone_gw->lasttime = jiffies;
 
                        batadv_bla_send_announce(bat_priv, backbone_gw);
+                       if (send_loopdetect)
+                               batadv_bla_send_loopdetect(bat_priv,
+                                                          backbone_gw);
 
                        /* request_sent is only set after creation to avoid
                         * problems when we are not yet known as backbone gw
@@ -1405,6 +1489,9 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
                bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
        bat_priv->bla.bcast_duplist_curr = 0;
 
+       atomic_set(&bat_priv->bla.loopdetect_next,
+                  BATADV_BLA_LOOPDETECT_PERIODS);
+
        if (bat_priv->bla.claim_hash)
                return 0;
 
@@ -1442,15 +1529,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
  * sent by another host, drop it. We allow equal packets from
  * the same host however as this might be intended.
  *
- * Return: 1 if a packet is in the duplicate list, 0 otherwise.
+ * Return: true if a packet is in the duplicate list, false otherwise.
  */
-int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-                                  struct sk_buff *skb)
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+                                   struct sk_buff *skb)
 {
-       int i, curr, ret = 0;
+       int i, curr;
        __be32 crc;
        struct batadv_bcast_packet *bcast_packet;
        struct batadv_bcast_duplist_entry *entry;
+       bool ret = false;
 
        bcast_packet = (struct batadv_bcast_packet *)skb->data;
 
@@ -1478,9 +1566,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                        continue;
 
                /* this entry seems to match: same crc, not too old,
-                * and from another gw. therefore return 1 to forbid it.
+                * and from another gw. therefore return true to forbid it.
                 */
-               ret = 1;
+               ret = true;
                goto out;
        }
        /* not found, add a new entry (overwrite the oldest entry)
@@ -1546,21 +1634,21 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
  *
- * Return: 1 if the orig_node is also a gateway on the soft interface, otherwise
- * it returns 0.
+ * Return: true if the orig_node is also a gateway on the soft interface,
+ * otherwise it returns false.
  */
-int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-                             struct batadv_orig_node *orig_node, int hdr_size)
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+                              struct batadv_orig_node *orig_node, int hdr_size)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        unsigned short vid;
 
        if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
-               return 0;
+               return false;
 
        /* first, find out the vid. */
        if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
-               return 0;
+               return false;
 
        vid = batadv_get_vid(skb, hdr_size);
 
@@ -1568,14 +1656,14 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
                                                orig_node->orig, vid);
        if (!backbone_gw)
-               return 0;
+               return false;
 
        batadv_backbone_gw_put(backbone_gw);
-       return 1;
+       return true;
 }
 
 /**
- * batadv_bla_init - free all bla structures
+ * batadv_bla_free - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1601,6 +1689,55 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
                batadv_hardif_put(primary_if);
 }
 
+/**
+ * batadv_bla_loopdetect_check - check and handle a detected loop
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to check
+ * @primary_if: interface where the request came on
+ * @vid: the VLAN ID of the frame
+ *
+ * Checks if this packet is a loop detect frame which has been sent by us,
+ * throw an uevent and log the event if that is the case.
+ *
+ * Return: true if it is a loop detect frame which is to be dropped, false
+ * otherwise.
+ */
+static bool
+batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                           struct batadv_hard_iface *primary_if,
+                           unsigned short vid)
+{
+       struct batadv_bla_backbone_gw *backbone_gw;
+       struct ethhdr *ethhdr;
+
+       ethhdr = eth_hdr(skb);
+
+       /* Only check for the MAC address and skip more checks here for
+        * performance reasons - this function is on the hotpath, after all.
+        */
+       if (!batadv_compare_eth(ethhdr->h_source,
+                               bat_priv->bla.loopdetect_addr))
+               return false;
+
+       /* If the packet came too late, don't forward it on the mesh
+        * but don't consider that as loop. It might be a coincidence.
+        */
+       if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
+                                BATADV_BLA_LOOPDETECT_TIMEOUT))
+               return true;
+
+       backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+                                                primary_if->net_dev->dev_addr,
+                                                vid, true);
+       if (unlikely(!backbone_gw))
+               return true;
+
+       queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+       /* backbone_gw is unreferenced in the report work function function */
+
+       return true;
+}
+
 /**
  * batadv_bla_rx - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
@@ -1614,16 +1751,16 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
  *
  * in these cases, the skb is further handled by this function
  *
- * Return: 1 if handled, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
  */
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                 unsigned short vid, bool is_bcast)
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                  unsigned short vid, bool is_bcast)
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
-       int ret;
+       bool ret;
 
        ethhdr = eth_hdr(skb);
 
@@ -1634,6 +1771,9 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto allow;
 
+       if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
+               goto handled;
+
        if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
@@ -1682,12 +1822,12 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        }
 allow:
        batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
-       ret = 0;
+       ret = false;
        goto out;
 
 handled:
        kfree_skb(skb);
-       ret = 1;
+       ret = true;
 
 out:
        if (primary_if)
@@ -1711,16 +1851,16 @@ out:
  *
  * This call might reallocate skb data.
  *
- * Return: 1 if handled, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
  */
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                 unsigned short vid)
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                  unsigned short vid)
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
-       int ret = 0;
+       bool ret = false;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
@@ -1774,10 +1914,10 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        }
 allow:
        batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
-       ret = 0;
+       ret = false;
        goto out;
 handled:
-       ret = 1;
+       ret = true;
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
@@ -1815,8 +1955,8 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
                   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
                   net_dev->name, primary_addr,
                   ntohs(bat_priv->bla.claim_dest.group));
-       seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-6s)\n",
-                  "Client", "VID", "Originator", "CRC");
+       seq_puts(seq,
+                "   Client               VID      Originator        [o] (CRC   )\n");
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
@@ -1873,8 +2013,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
                   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
                   net_dev->name, primary_addr,
                   ntohs(bat_priv->bla.claim_dest.group));
-       seq_printf(seq, "   %-17s    %-5s %-9s (%-6s)\n",
-                  "Originator", "VID", "last seen", "CRC");
+       seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
index 579f0fa6fe6a47c7fd1c7bba73dd4496cec24ca7..0f01daeb359e74ae2f69ae678e97a4c09c767511 100644 (file)
@@ -27,19 +27,20 @@ struct seq_file;
 struct sk_buff;
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                 unsigned short vid, bool is_bcast);
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                 unsigned short vid);
-int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-                             struct batadv_orig_node *orig_node, int hdr_size);
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                  unsigned short vid, bool is_bcast);
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                  unsigned short vid);
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+                              struct batadv_orig_node *orig_node,
+                              int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
                                             void *offset);
 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
                                    unsigned short vid);
-int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-                                  struct sk_buff *skb);
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+                                   struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct batadv_hard_iface *oldif);
@@ -50,24 +51,24 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
 #define BATADV_BLA_CRC_INIT    0
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
-static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, unsigned short vid,
-                               bool is_bcast)
+static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
+                                struct sk_buff *skb, unsigned short vid,
+                                bool is_bcast)
 {
-       return 0;
+       return false;
 }
 
-static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, unsigned short vid)
+static inline bool batadv_bla_tx(struct batadv_priv *bat_priv,
+                                struct sk_buff *skb, unsigned short vid)
 {
-       return 0;
+       return false;
 }
 
-static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-                                           struct batadv_orig_node *orig_node,
-                                           int hdr_size)
+static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+                                            struct batadv_orig_node *orig_node,
+                                            int hdr_size)
 {
-       return 0;
+       return false;
 }
 
 static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
@@ -88,11 +89,11 @@ static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
        return false;
 }
 
-static inline int
+static inline bool
 batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                               struct sk_buff *skb)
 {
-       return 0;
+       return false;
 }
 
 static inline void
index 48253cf8341bd82d70d0cd218571741b9257fa37..952900466d8829bc158357ea06caf9432718c0c9 100644 (file)
@@ -134,7 +134,7 @@ static int batadv_log_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static int batadv_log_empty(struct batadv_priv_debug_log *debug_log)
+static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log)
 {
        return !(debug_log->log_start - debug_log->log_end);
 }
@@ -365,14 +365,17 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 
 #define BATADV_DEBUGINFO(_name, _mode, _open)          \
 struct batadv_debuginfo batadv_debuginfo_##_name = {   \
-       .attr = { .name = __stringify(_name),           \
-                 .mode = _mode, },                     \
-       .fops = { .owner = THIS_MODULE,                 \
-                 .open = _open,                        \
-                 .read = seq_read,                     \
-                 .llseek = seq_lseek,                  \
-                 .release = single_release,            \
-               }                                       \
+       .attr = {                                       \
+               .name = __stringify(_name),             \
+               .mode = _mode,                          \
+       },                                              \
+       .fops = {                                       \
+               .owner = THIS_MODULE,                   \
+               .open = _open,                          \
+               .read   = seq_read,                     \
+               .llseek = seq_lseek,                    \
+               .release = single_release,              \
+       },                                              \
 }
 
 /* the following attributes are general and therefore they will be directly
index e96d7c745b4a1de6444284f38207ed599925ac2e..278800a99c69458c083da635bd39bcd19e788842 100644 (file)
@@ -152,7 +152,7 @@ static void batadv_dat_purge(struct work_struct *work)
        struct batadv_priv_dat *priv_dat;
        struct batadv_priv *bat_priv;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
        bat_priv = container_of(priv_dat, struct batadv_priv, dat);
 
@@ -165,14 +165,14 @@ static void batadv_dat_purge(struct work_struct *work)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if the two entries are the same, 0 otherwise.
+ * Return: true if the two entries are the same, false otherwise.
  */
-static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
+static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_dat_entry,
                                         hash_entry);
 
-       return memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0;
+       return memcmp(data1, data2, sizeof(__be32)) == 0;
 }
 
 /**
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
  *
  * An originator O is selected if and only if its DHT_ID value is one of three
  * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+                            unsigned short vid)
 {
        int select;
        batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
                return NULL;
 
        dat.ip = ip_dst;
-       dat.vid = 0;
+       dat.vid = vid;
        ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
                                                    BATADV_DAT_ADDR_MAX);
 
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
+ * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
  * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                                 struct sk_buff *skb, __be32 ip,
-                                int packet_subtype)
+                                unsigned short vid, int packet_subtype)
 {
        int i;
        bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
        struct sk_buff *tmp_skb;
        struct batadv_dat_candidate *cand;
 
-       cand = batadv_dat_select_candidates(bat_priv, ip);
+       cand = batadv_dat_select_candidates(bat_priv, ip, vid);
        if (!cand)
                goto out;
 
@@ -717,7 +720,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -814,8 +817,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
-       seq_printf(seq, "          %-7s          %-9s %4s %11s\n", "IPv4",
-                  "MAC", "VID", "last-seen");
+       seq_puts(seq,
+                "          IPv4             MAC        VID   last-seen\n");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                ret = true;
        } else {
                /* Send the request to the DHT */
-               ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+               ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
                                           BATADV_P_DAT_DHT_GET);
        }
 out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
         */
-       batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
-       batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
 }
 
 /**
index e6956d0746a20f91c51d256c231ddfe4f2626c26..65536db1bff7642ea261493bcceceeab067ba2c4 100644 (file)
@@ -407,8 +407,8 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
                                          unsigned int mtu)
 {
        struct sk_buff *skb_fragment;
-       unsigned header_size = sizeof(*frag_head);
-       unsigned fragment_size = mtu - header_size;
+       unsigned int header_size = sizeof(*frag_head);
+       unsigned int fragment_size = mtu - header_size;
 
        skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
        if (!skb_fragment)
@@ -444,15 +444,15 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
-       unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
-       unsigned header_size = sizeof(frag_header);
-       unsigned max_fragment_size, max_packet_size;
+       unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
+       unsigned int header_size = sizeof(frag_header);
+       unsigned int max_fragment_size, max_packet_size;
        bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
         */
-       mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+       mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
        max_fragment_size = mtu - header_size;
        max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
 
index c59aff5ccac8a63f191a669eafcfdba7a270fd75..5839c569f769ef7137fca9b92915799b78e21123 100644 (file)
@@ -135,8 +135,8 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 
        spin_lock_bh(&bat_priv->gw.list_lock);
 
-       if (new_gw_node && !kref_get_unless_zero(&new_gw_node->refcount))
-               new_gw_node = NULL;
+       if (new_gw_node)
+               kref_get(&new_gw_node->refcount);
 
        curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
        rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
@@ -440,15 +440,11 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
-       if (!kref_get_unless_zero(&orig_node->refcount))
-               return;
-
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node) {
-               batadv_orig_node_put(orig_node);
+       if (!gw_node)
                return;
-       }
 
+       kref_get(&orig_node->refcount);
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
        gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
index b22b2775a0a5ff706c17ea06c39c9bb092da1f9a..8c2f39962fa591ab02afacb2338feaefd698779f 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
-#include <net/net_namespace.h>
 
 #include "bridge_loop_avoidance.h"
 #include "debugfs.h"
@@ -121,6 +120,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
 static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
 {
        struct net_device *parent_dev;
+       struct net *net = dev_net(net_dev);
        bool ret;
 
        /* check if this is a batman-adv mesh interface */
@@ -133,7 +133,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return false;
 
        /* recurse over the parent device */
-       parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
+       parent_dev = __dev_get_by_index(net, dev_get_iflink(net_dev));
        /* if we got a NULL parent_dev there is something broken.. */
        if (WARN(!parent_dev, "Cannot find parent device"))
                return false;
@@ -146,22 +146,22 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
        return ret;
 }
 
-static int batadv_is_valid_iface(const struct net_device *net_dev)
+static bool batadv_is_valid_iface(const struct net_device *net_dev)
 {
        if (net_dev->flags & IFF_LOOPBACK)
-               return 0;
+               return false;
 
        if (net_dev->type != ARPHRD_ETHER)
-               return 0;
+               return false;
 
        if (net_dev->addr_len != ETH_ALEN)
-               return 0;
+               return false;
 
        /* no batman over batman */
        if (batadv_is_on_batman_iface(net_dev))
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /**
@@ -236,8 +236,8 @@ static void batadv_primary_if_select(struct batadv_priv *bat_priv,
 
        ASSERT_RTNL();
 
-       if (new_hard_iface && !kref_get_unless_zero(&new_hard_iface->refcount))
-               new_hard_iface = NULL;
+       if (new_hard_iface)
+               kref_get(&new_hard_iface->refcount);
 
        curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
        rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
 
        batadv_update_min_mtu(hard_iface->soft_iface);
 
+       if (bat_priv->bat_algo_ops->bat_iface_activate)
+               bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
@@ -453,7 +456,7 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
 }
 
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
-                                  const char *iface_name)
+                                  struct net *net, const char *iface_name)
 {
        struct batadv_priv *bat_priv;
        struct net_device *soft_iface, *master;
@@ -464,13 +467,12 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
                goto out;
 
-       if (!kref_get_unless_zero(&hard_iface->refcount))
-               goto out;
+       kref_get(&hard_iface->refcount);
 
-       soft_iface = dev_get_by_name(&init_net, iface_name);
+       soft_iface = dev_get_by_name(net, iface_name);
 
        if (!soft_iface) {
-               soft_iface = batadv_softif_create(iface_name);
+               soft_iface = batadv_softif_create(net, iface_name);
 
                if (!soft_iface) {
                        ret = -ENOMEM;
@@ -519,6 +521,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
                goto err_upper;
        }
 
+       kref_get(&hard_iface->refcount);
        hard_iface->batman_adv_ptype.type = ethertype;
        hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
        hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
 
-       if (hard_iface->if_status == BATADV_IF_ACTIVE)
-               batadv_hardif_deactivate_interface(hard_iface);
+       batadv_hardif_deactivate_interface(hard_iface);
 
        if (hard_iface->if_status != BATADV_IF_INACTIVE)
                goto out;
@@ -581,6 +583,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
                    hard_iface->net_dev->name);
        dev_remove_pack(&hard_iface->batman_adv_ptype);
+       batadv_hardif_put(hard_iface);
 
        bat_priv->num_ifaces--;
        batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
@@ -650,8 +653,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
 
        ASSERT_RTNL();
 
-       ret = batadv_is_valid_iface(net_dev);
-       if (ret != 1)
+       if (!batadv_is_valid_iface(net_dev))
                goto out;
 
        dev_hold(net_dev);
index d74f1983f33e1d430dbc332da510f6d24247aab7..a76724d369bfda14a772b56af2bff2b9cb836e80 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 
 struct net_device;
+struct net;
 
 enum batadv_hard_if_state {
        BATADV_IF_NOT_IN_USE,
@@ -55,7 +56,7 @@ bool batadv_is_wifi_iface(int ifindex);
 struct batadv_hard_iface*
 batadv_hardif_get_by_netdev(const struct net_device *net_dev);
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
-                                  const char *iface_name);
+                                  struct net *net, const char *iface_name);
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
                                     enum batadv_hard_if_cleanup autodel);
 void batadv_hardif_remove_interfaces(void);
index 9bb57b87447cc0ca43c8c9bf31625c3a1f00303c..cbbf87075f06fc7c68103a9a65147748b706e40b 100644 (file)
@@ -32,10 +32,10 @@ struct lock_class_key;
 /* callback to a compare function.  should compare 2 element datas for their
  * keys
  *
- * Return: 0 if same and not 0 if not same
+ * Return: true if same and false if not same
  */
-typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
-                                         const void *);
+typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
+                                          const void *);
 
 /* the hashfunction
  *
index 14d0013b387ed4cfbf797884857414ae8bf0e7f7..777aea10cd8fcb22ce25861ba39e52f68dbcc602 100644 (file)
@@ -104,25 +104,21 @@ static int batadv_socket_open(struct inode *inode, struct file *file)
 
 static int batadv_socket_release(struct inode *inode, struct file *file)
 {
-       struct batadv_socket_client *socket_client = file->private_data;
-       struct batadv_socket_packet *socket_packet;
-       struct list_head *list_pos, *list_pos_tmp;
+       struct batadv_socket_client *client = file->private_data;
+       struct batadv_socket_packet *packet, *tmp;
 
-       spin_lock_bh(&socket_client->lock);
+       spin_lock_bh(&client->lock);
 
        /* for all packets in the queue ... */
-       list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
-               socket_packet = list_entry(list_pos,
-                                          struct batadv_socket_packet, list);
-
-               list_del(list_pos);
-               kfree(socket_packet);
+       list_for_each_entry_safe(packet, tmp, &client->queue_list, list) {
+               list_del(&packet->list);
+               kfree(packet);
        }
 
-       batadv_socket_client_hash[socket_client->index] = NULL;
-       spin_unlock_bh(&socket_client->lock);
+       batadv_socket_client_hash[client->index] = NULL;
+       spin_unlock_bh(&client->lock);
 
-       kfree(socket_client);
+       kfree(client);
        module_put(THIS_MODULE);
 
        return 0;
@@ -337,7 +333,7 @@ err:
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
index d64ddb961979ae083ba99907b07fc090a3dfe775..5f2974bd1227f32301dc607039d3271add649281 100644 (file)
@@ -401,11 +401,19 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 
        hard_iface = container_of(ptype, struct batadv_hard_iface,
                                  batman_adv_ptype);
+
+       /* Prevent processing a packet received on an interface which is getting
+        * shut down otherwise the packet may trigger de-reference errors
+        * further down in the receive path.
+        */
+       if (!kref_get_unless_zero(&hard_iface->refcount))
+               goto err_out;
+
        skb = skb_share_check(skb, GFP_ATOMIC);
 
        /* skb was released by skb_share_check() */
        if (!skb)
-               goto err_out;
+               goto err_put;
 
        /* packet should hold at least type and version */
        if (unlikely(!pskb_may_pull(skb, 2)))
@@ -448,6 +456,8 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        if (ret == NET_RX_DROP)
                kfree_skb(skb);
 
+       batadv_hardif_put(hard_iface);
+
        /* return NET_RX_SUCCESS in any case as we
         * most probably dropped the packet for
         * routing-logical reasons.
@@ -456,6 +466,8 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 
 err_free:
        kfree_skb(skb);
+err_put:
+       batadv_hardif_put(hard_iface);
 err_out:
        return NET_RX_DROP;
 }
@@ -663,8 +675,8 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
  *
  * Return: tvlv handler if found or NULL otherwise.
  */
-static struct batadv_tvlv_handler
-*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+static struct batadv_tvlv_handler *
+batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
        struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
 
@@ -722,8 +734,8 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
  *
  * Return: tvlv container if found or NULL otherwise.
  */
-static struct batadv_tvlv_container
-*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+static struct batadv_tvlv_container *
+batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
        struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
 
@@ -736,9 +748,7 @@ static struct batadv_tvlv_container
                if (tvlv_tmp->tvlv_hdr.version != version)
                        continue;
 
-               if (!kref_get_unless_zero(&tvlv_tmp->refcount))
-                       continue;
-
+               kref_get(&tvlv_tmp->refcount);
                tvlv = tvlv_tmp;
                break;
        }
index db453363183445843c7fd210ed3bd0d9cbd9dd1f..76925266deed1b572e1afd796b3df9cdef56d8a1 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.1"
+#define BATADV_SOURCE_VERSION "2016.2"
 #endif
 
 /* B.A.T.M.A.N. parameters */
 #define BATADV_BLA_BACKBONE_TIMEOUT    (BATADV_BLA_PERIOD_LENGTH * 6)
 #define BATADV_BLA_CLAIM_TIMEOUT       (BATADV_BLA_PERIOD_LENGTH * 10)
 #define BATADV_BLA_WAIT_PERIODS                3
+#define BATADV_BLA_LOOPDETECT_PERIODS  6
+#define BATADV_BLA_LOOPDETECT_TIMEOUT  3000    /* 3 seconds */
 
 #define BATADV_DUPLIST_SIZE            16
 #define BATADV_DUPLIST_TIMEOUT         500     /* 500 ms */
@@ -142,10 +144,12 @@ enum batadv_uev_action {
        BATADV_UEV_ADD = 0,
        BATADV_UEV_DEL,
        BATADV_UEV_CHANGE,
+       BATADV_UEV_LOOPDETECT,
 };
 
 enum batadv_uev_type {
        BATADV_UEV_GW = 0,
+       BATADV_UEV_BLA,
 };
 
 #define BATADV_GW_THRESHOLD    50
@@ -288,7 +292,7 @@ static inline void _batadv_dbg(int type __always_unused,
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
  *
- * Return: 1 if they are the same ethernet addr
+ * Return: true if they are the same ethernet addr
  */
 static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
@@ -296,7 +300,8 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
 }
 
 /**
- * has_timed_out - compares current time (jiffies) and timestamp + timeout
+ * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ *  timeout
  * @timestamp:         base value to compare with (in jiffies)
  * @timeout:           added to base value before comparing (in milliseconds)
  *
index 8caa2c72efa33511108a2b2e5f3e867bd6ebe59f..c32f24fafe67e68b0b2ddc99c87b550f5c2892d1 100644 (file)
@@ -394,7 +394,8 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_all_ip_count - count nodes with unspecific mcast interest
+ * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
  *
@@ -433,7 +434,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -460,7 +461,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -487,7 +488,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_want_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -511,7 +512,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
index b41719b6487a40cd8571461b8eec03a60c7f4cfe..678f0686531254ba057490c479e9f6e1ad74c7b1 100644 (file)
@@ -510,10 +510,10 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if the two entry are the same, 0 otherwise
+ * Return: true if the two entry are the same, false otherwise
  */
-static int batadv_nc_hash_compare(const struct hlist_node *node,
-                                 const void *data2)
+static bool batadv_nc_hash_compare(const struct hlist_node *node,
+                                  const void *data2)
 {
        const struct batadv_nc_path *nc_path1, *nc_path2;
 
@@ -521,15 +521,13 @@ static int batadv_nc_hash_compare(const struct hlist_node *node,
        nc_path2 = data2;
 
        /* Return 1 if the two keys are identical */
-       if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop,
-                  sizeof(nc_path1->prev_hop)) != 0)
-               return 0;
+       if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
+               return false;
 
-       if (memcmp(nc_path1->next_hop, nc_path2->next_hop,
-                  sizeof(nc_path1->next_hop)) != 0)
-               return 0;
+       if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
+               return false;
 
-       return 1;
+       return true;
 }
 
 /**
@@ -714,7 +712,7 @@ static void batadv_nc_worker(struct work_struct *work)
        struct batadv_priv *bat_priv;
        unsigned long timeout;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
        bat_priv = container_of(priv_nc, struct batadv_priv, nc);
 
@@ -793,10 +791,10 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
  *
  * Return: the nc_node if found, NULL otherwise.
  */
-static struct batadv_nc_node
-*batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
-                       struct batadv_orig_node *orig_neigh_node,
-                       bool in_coding)
+static struct batadv_nc_node *
+batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
+                      struct batadv_orig_node *orig_neigh_node,
+                      bool in_coding)
 {
        struct batadv_nc_node *nc_node, *nc_node_out = NULL;
        struct list_head *list;
@@ -835,11 +833,11 @@ static struct batadv_nc_node
  *
  * Return: the nc_node if found or created, NULL in case of an error.
  */
-static struct batadv_nc_node
-*batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
-                      struct batadv_orig_node *orig_node,
-                      struct batadv_orig_node *orig_neigh_node,
-                      bool in_coding)
+static struct batadv_nc_node *
+batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
+                     struct batadv_orig_node *orig_node,
+                     struct batadv_orig_node *orig_neigh_node,
+                     bool in_coding)
 {
        struct batadv_nc_node *nc_node;
        spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
@@ -856,8 +854,7 @@ static struct batadv_nc_node
        if (!nc_node)
                return NULL;
 
-       if (!kref_get_unless_zero(&orig_neigh_node->refcount))
-               goto free;
+       kref_get(&orig_neigh_node->refcount);
 
        /* Initialize nc_node */
        INIT_LIST_HEAD(&nc_node->list);
@@ -884,10 +881,6 @@ static struct batadv_nc_node
        spin_unlock_bh(lock);
 
        return nc_node;
-
-free:
-       kfree(nc_node);
-       return NULL;
 }
 
 /**
index e4cbb0753e37ff6681a20e8912df1f4ad1e74a05..1ff4ee473966d23328ee0dbb926cbb72945e6a99 100644 (file)
@@ -54,9 +54,9 @@ static void batadv_purge_orig(struct work_struct *work);
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if they are the same originator
+ * Return: true if they are the same originator
  */
-int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_orig_node,
                                         hash_entry);
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
 {
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
-       struct batadv_hardif_neigh_node *hardif_neigh;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        struct batadv_algo_ops *bao;
 
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
                batadv_neigh_ifinfo_put(neigh_ifinfo);
        }
 
-       hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
-                                              neigh_node->addr);
-       if (hardif_neigh) {
-               /* batadv_hardif_neigh_get() increases refcount too */
-               batadv_hardif_neigh_put(hardif_neigh);
-               batadv_hardif_neigh_put(hardif_neigh);
-       }
+       batadv_hardif_neigh_put(neigh_node->hardif_neigh);
 
        if (bao->bat_neigh_free)
                bao->bat_neigh_free(neigh_node);
@@ -289,7 +282,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
 }
 
 /**
- * batadv_orig_node_get_router - router to the originator depending on iface
+ * batadv_orig_router_get - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -381,12 +374,8 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
        if (!orig_ifinfo)
                goto out;
 
-       if (if_outgoing != BATADV_IF_DEFAULT &&
-           !kref_get_unless_zero(&if_outgoing->refcount)) {
-               kfree(orig_ifinfo);
-               orig_ifinfo = NULL;
-               goto out;
-       }
+       if (if_outgoing != BATADV_IF_DEFAULT)
+               kref_get(&if_outgoing->refcount);
 
        reset_time = jiffies - 1;
        reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
@@ -462,11 +451,8 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
        if (!neigh_ifinfo)
                goto out;
 
-       if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
-               kfree(neigh_ifinfo);
-               neigh_ifinfo = NULL;
-               goto out;
-       }
+       if (if_outgoing)
+               kref_get(&if_outgoing->refcount);
 
        INIT_HLIST_NODE(&neigh_ifinfo->list);
        kref_init(&neigh_ifinfo->refcount);
@@ -539,15 +525,11 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
        if (hardif_neigh)
                goto out;
 
-       if (!kref_get_unless_zero(&hard_iface->refcount))
-               goto out;
-
        hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
-       if (!hardif_neigh) {
-               batadv_hardif_put(hard_iface);
+       if (!hardif_neigh)
                goto out;
-       }
 
+       kref_get(&hard_iface->refcount);
        INIT_HLIST_NODE(&hardif_neigh->list);
        ether_addr_copy(hardif_neigh->addr, neigh_addr);
        hardif_neigh->if_incoming = hard_iface;
@@ -650,19 +632,19 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        if (!neigh_node)
                goto out;
 
-       if (!kref_get_unless_zero(&hard_iface->refcount)) {
-               kfree(neigh_node);
-               neigh_node = NULL;
-               goto out;
-       }
-
        INIT_HLIST_NODE(&neigh_node->list);
        INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
        spin_lock_init(&neigh_node->ifinfo_lock);
 
+       kref_get(&hard_iface->refcount);
        ether_addr_copy(neigh_node->addr, neigh_addr);
        neigh_node->if_incoming = hard_iface;
        neigh_node->orig_node = orig_node;
+       neigh_node->last_seen = jiffies;
+
+       /* increment unique neighbor refcount */
+       kref_get(&hardif_neigh->refcount);
+       neigh_node->hardif_neigh = hardif_neigh;
 
        /* extra reference for return */
        kref_init(&neigh_node->refcount);
@@ -672,9 +654,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
-       /* increment unique neighbor refcount */
-       kref_get(&hardif_neigh->refcount);
-
        batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
                   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
                   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
@@ -1165,6 +1144,9 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                if (hard_iface->soft_iface != bat_priv->soft_iface)
                        continue;
 
+               if (!kref_get_unless_zero(&hard_iface->refcount))
+                       continue;
+
                best_neigh_node = batadv_find_best_neighbor(bat_priv,
                                                            orig_node,
                                                            hard_iface);
@@ -1172,6 +1154,8 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                                    best_neigh_node);
                if (best_neigh_node)
                        batadv_neigh_node_put(best_neigh_node);
+
+               batadv_hardif_put(hard_iface);
        }
        rcu_read_unlock();
 
@@ -1222,7 +1206,7 @@ static void batadv_purge_orig(struct work_struct *work)
        struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
        _batadv_purge_orig(bat_priv);
        queue_delayed_work(batadv_event_workqueue,
index 4e8b67f110511302b42add7d5aa517510b6f7380..64a8951e58446fc11974622ab59b3b6a2d78dc34 100644 (file)
@@ -33,7 +33,7 @@
 
 struct seq_file;
 
-int batadv_compare_orig(const struct hlist_node *node, const void *data2);
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
index 8a8d7ca1a5cf16542bcee9e21ac7a5673aaf11f2..372128ddb474de09b8e60538f499a8bf5d463c9c 100644 (file)
@@ -175,6 +175,7 @@ enum batadv_bla_claimframe {
        BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
        BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
+       BATADV_CLAIM_TYPE_LOOPDETECT    = 0x04,
 };
 
 /**
@@ -501,7 +502,7 @@ struct batadv_coded_packet {
 #pragma pack()
 
 /**
- * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
index 4dd646a52f1a16ca297a3cdb1abeff56206df166..ae850f2d11cba30230a2d6ed881c3b2ab3c245f1 100644 (file)
@@ -100,11 +100,20 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
        if (curr_router)
                batadv_neigh_node_put(curr_router);
 
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       /* curr_router used earlier may not be the current orig_ifinfo->router
+        * anymore because it was dereferenced outside of the neigh_list_lock
+        * protected region. After the new best neighbor has replace the current
+        * best neighbor the reference counter needs to decrease. Consequently,
+        * the code needs to ensure the curr_router variable contains a pointer
+        * to the replaced best neighbor.
+        */
+       curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
        /* increase refcount of new best neighbor */
-       if (neigh_node && !kref_get_unless_zero(&neigh_node->refcount))
-               neigh_node = NULL;
+       if (neigh_node)
+               kref_get(&neigh_node->refcount);
 
-       spin_lock_bh(&orig_node->neigh_list_lock);
        rcu_assign_pointer(orig_ifinfo->router, neigh_node);
        spin_unlock_bh(&orig_node->neigh_list_lock);
        batadv_orig_ifinfo_put(orig_ifinfo);
@@ -154,18 +163,18 @@ out:
  *   doesn't change otherwise.
  *
  * Return:
- *  0 if the packet is to be accepted.
- *  1 if the packet is to be ignored.
+ *  false if the packet is to be accepted.
+ *  true if the packet is to be ignored.
  */
-int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
-                           s32 seq_old_max_diff, unsigned long *last_reset,
-                           bool *protection_started)
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+                            s32 seq_old_max_diff, unsigned long *last_reset,
+                            bool *protection_started)
 {
        if (seq_num_diff <= -seq_old_max_diff ||
            seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
                if (!batadv_has_timed_out(*last_reset,
                                          BATADV_RESET_PROTECTION_MS))
-                       return 1;
+                       return true;
 
                *last_reset = jiffies;
                if (protection_started)
@@ -174,7 +183,7 @@ int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
                           "old packet received, start protection\n");
        }
 
-       return 0;
+       return false;
 }
 
 bool batadv_check_management_packet(struct sk_buff *skb,
@@ -709,8 +718,9 @@ out:
        return ret;
 }
 
-static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
-                                    struct sk_buff *skb, int hdr_len) {
+static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+                                     struct sk_buff *skb, int hdr_len)
+{
        struct batadv_unicast_packet *unicast_packet;
        struct batadv_hard_iface *primary_if;
        struct batadv_orig_node *orig_node;
@@ -721,11 +731,11 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 
        /* check if there is enough data before accessing it */
        if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
-               return 0;
+               return false;
 
        /* create a copy of the skb (in case of for re-routing) to modify it. */
        if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
-               return 0;
+               return false;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        vid = batadv_get_vid(skb, hdr_len);
@@ -749,7 +759,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                 * table. If not, let the packet go untouched anyway because
                 * there is nothing the node can do
                 */
-               return 1;
+               return true;
        }
 
        /* retrieve the TTVN known by this node for the packet destination. This
@@ -765,7 +775,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                 * not be possible to deliver it
                 */
                if (!orig_node)
-                       return 0;
+                       return false;
 
                curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
                batadv_orig_node_put(orig_node);
@@ -776,7 +786,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         */
        is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
        if (!is_old_ttvn)
-               return 1;
+               return true;
 
        old_ttvn = unicast_packet->ttvn;
        /* the packet was forged based on outdated network information. Its
@@ -789,7 +799,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                                       "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
                                       unicast_packet->dest, ethhdr->h_dest,
                                       old_ttvn, curr_ttvn);
-               return 1;
+               return true;
        }
 
        /* the packet has not been re-routed: either the destination is
@@ -797,14 +807,14 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * it is possible to drop the packet
         */
        if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
-               return 0;
+               return false;
 
        /* update the header in order to let the packet be delivered to this
         * node's soft interface
         */
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
-               return 0;
+               return false;
 
        ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
 
@@ -812,7 +822,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 
        unicast_packet->ttvn = curr_ttvn;
 
-       return 1;
+       return true;
 }
 
 /**
@@ -903,7 +913,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
                                                        hdr_size))
                        goto rx_success;
 
-               batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+               batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
                                    orig_node);
 
 rx_success:
@@ -1113,8 +1123,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
                goto rx_success;
 
        /* broadcast for me */
-       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
-                           orig_node);
+       batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
 
 rx_success:
        ret = NET_RX_SUCCESS;
index 02a5caa84127e4dc2ffb4b675cb76486bda075b0..05c3ff42e1816743989ad0c6ed4bc6eea943e448 100644 (file)
@@ -51,8 +51,8 @@ struct batadv_neigh_node *
 batadv_find_router(struct batadv_priv *bat_priv,
                   struct batadv_orig_node *orig_node,
                   struct batadv_hard_iface *recv_if);
-int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
-                           s32 seq_old_max_diff, unsigned long *last_reset,
-                           bool *protection_started);
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+                            s32 seq_old_max_diff, unsigned long *last_reset,
+                            bool *protection_started);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
index 3ce06e0a91b1c125cdf5ff594db57529a2deaacf..f2f125684ed9c199e1c4f7f7b3b6109b3b9a51c2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/if.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -552,7 +553,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
        struct net_device *soft_iface;
        struct batadv_priv *bat_priv;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        soft_iface = forw_packet->if_incoming->soft_iface;
@@ -577,10 +578,15 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
                if (forw_packet->num_packets >= hard_iface->num_bcasts)
                        continue;
 
+               if (!kref_get_unless_zero(&hard_iface->refcount))
+                       continue;
+
                /* send a copy of the saved skb */
                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
                if (skb1)
                        batadv_send_broadcast_skb(skb1, hard_iface);
+
+               batadv_hardif_put(hard_iface);
        }
        rcu_read_unlock();
 
@@ -604,7 +610,7 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
        struct batadv_forw_packet *forw_packet;
        struct batadv_priv *bat_priv;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
@@ -675,6 +681,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->bcast_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
@@ -702,6 +711,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->batman_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
index 0710379491bffc0627c35f0ed5ba6bbf535bb1bb..343d2c90439928cfd986654de265eeb10ca9a10a 100644 (file)
@@ -186,7 +186,6 @@ static int batadv_interface_tx(struct sk_buff *skb,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_bcast_packet *bcast_packet;
-       __be16 ethertype = htons(ETH_P_BATMAN);
        static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
                                              0x00, 0x00};
        static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
@@ -208,7 +207,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
 
-       soft_iface->trans_start = jiffies;
+       netif_trans_update(soft_iface);
        vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
@@ -216,7 +215,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
        case ETH_P_8021Q:
                vhdr = vlan_eth_hdr(skb);
 
-               if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+               /* drop batman-in-batman packets to prevent loops */
+               if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) {
                        network_offset += VLAN_HLEN;
                        break;
                }
@@ -381,13 +381,29 @@ end:
        return NETDEV_TX_OK;
 }
 
+/**
+ * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * @soft_iface: local interface which will receive the ethernet frame
+ * @skb: ethernet frame for @soft_iface
+ * @hdr_size: size of already parsed batman-adv header
+ * @orig_node: originator from which the batman-adv packet was sent
+ *
+ * Sends a ethernet frame to the receive path of the local @soft_iface.
+ * skb->data has still point to the batman-adv header with the size @hdr_size.
+ * The caller has to have parsed this header already and made sure that at least
+ * @hdr_size bytes are still available for pull in @skb.
+ *
+ * The packet may still get dropped. This can happen when the encapsulated
+ * ethernet frame is invalid or contains again an batman-adv packet. Also
+ * unicast packets will be dropped directly when it was sent between two
+ * isolated clients.
+ */
 void batadv_interface_rx(struct net_device *soft_iface,
-                        struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-                        int hdr_size, struct batadv_orig_node *orig_node)
+                        struct sk_buff *skb, int hdr_size,
+                        struct batadv_orig_node *orig_node)
 {
        struct batadv_bcast_packet *batadv_bcast_packet;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-       __be16 ethertype = htons(ETH_P_BATMAN);
        struct vlan_ethhdr *vhdr;
        struct ethhdr *ethhdr;
        unsigned short vid;
@@ -396,10 +412,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
        is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
 
-       /* check if enough space is available for pulling, and pull */
-       if (!pskb_may_pull(skb, hdr_size))
-               goto dropped;
-
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
@@ -408,14 +420,21 @@ void batadv_interface_rx(struct net_device *soft_iface,
         */
        nf_reset(skb);
 
+       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+               goto dropped;
+
        vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
+               if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+                       goto dropped;
+
                vhdr = (struct vlan_ethhdr *)skb->data;
 
-               if (vhdr->h_vlan_encapsulated_proto != ethertype)
+               /* drop batman-in-batman packets to prevent loops */
+               if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
                        break;
 
                /* fall through */
@@ -424,8 +443,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        }
 
        /* skb->dev & skb->pkt_type are set here */
-       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-               goto dropped;
        skb->protocol = eth_type_trans(skb, soft_iface);
 
        /* should not be necessary anymore as we use skb_pull_rcsum()
@@ -539,7 +556,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -868,13 +885,14 @@ static int batadv_softif_slave_add(struct net_device *dev,
                                   struct net_device *slave_dev)
 {
        struct batadv_hard_iface *hard_iface;
+       struct net *net = dev_net(dev);
        int ret = -EINVAL;
 
        hard_iface = batadv_hardif_get_by_netdev(slave_dev);
        if (!hard_iface || hard_iface->soft_iface)
                goto out;
 
-       ret = batadv_hardif_enable_interface(hard_iface, dev->name);
+       ret = batadv_hardif_enable_interface(hard_iface, net, dev->name);
 
 out:
        if (hard_iface)
@@ -955,7 +973,7 @@ static void batadv_softif_init_early(struct net_device *dev)
 
        dev->netdev_ops = &batadv_netdev_ops;
        dev->destructor = batadv_softif_free;
-       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
        dev->priv_flags |= IFF_NO_QUEUE;
 
        /* can't call min_mtu, because the needed variables
@@ -971,7 +989,7 @@ static void batadv_softif_init_early(struct net_device *dev)
        memset(priv, 0, sizeof(*priv));
 }
 
-struct net_device *batadv_softif_create(const char *name)
+struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
        struct net_device *soft_iface;
        int ret;
@@ -981,6 +999,8 @@ struct net_device *batadv_softif_create(const char *name)
        if (!soft_iface)
                return NULL;
 
+       dev_net_set(soft_iface, net);
+
        soft_iface->rtnl_link_ops = &batadv_link_ops;
 
        ret = register_netdevice(soft_iface);
@@ -1025,12 +1045,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
        unregister_netdevice_queue(soft_iface, head);
 }
 
-int batadv_softif_is_valid(const struct net_device *net_dev)
+bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
        if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
-               return 1;
+               return true;
 
-       return 0;
+       return false;
 }
 
 struct rtnl_link_ops batadv_link_ops __read_mostly = {
index 9ae265703d237462541b356b6cacaa47774c27ba..ec303ddbf647828947f8c83c7edde06582735ad6 100644 (file)
 
 #include "main.h"
 
+#include <linux/types.h>
 #include <net/rtnetlink.h>
 
 struct net_device;
+struct net;
 struct sk_buff;
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
 void batadv_interface_rx(struct net_device *soft_iface,
-                        struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-                        int hdr_size, struct batadv_orig_node *orig_node);
-struct net_device *batadv_softif_create(const char *name);
+                        struct sk_buff *skb, int hdr_size,
+                        struct batadv_orig_node *orig_node);
+struct net_device *batadv_softif_create(struct net *net, const char *name);
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
-int batadv_softif_is_valid(const struct net_device *net_dev);
+bool batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
 void batadv_softif_vlan_put(struct batadv_softif_vlan *softif_vlan);
index e7cf51333a36455b6288f3c1b029eb3a187939d5..414b2074165f3368d1cd307ad72c477b2489c6d7 100644 (file)
@@ -116,11 +116,13 @@ batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
 static char *batadv_uev_action_str[] = {
        "add",
        "del",
-       "change"
+       "change",
+       "loopdetect",
 };
 
 static char *batadv_uev_type_str[] = {
-       "gw"
+       "gw",
+       "bla",
 };
 
 /* Use this, if you have customized show and store functions for vlan attrs */
@@ -830,6 +832,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
                                       size_t count)
 {
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+       struct net *net = dev_net(net_dev);
        struct batadv_hard_iface *hard_iface;
        int status_tmp = -1;
        int ret = count;
@@ -873,7 +876,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
                batadv_hardif_disable_interface(hard_iface,
                                                BATADV_IF_CLEANUP_AUTO);
 
-       ret = batadv_hardif_enable_interface(hard_iface, buff);
+       ret = batadv_hardif_enable_interface(hard_iface, net, buff);
 
 unlock:
        rtnl_unlock();
index 0b43e86328a59bb42f7200380b840b0209fd26a1..feaf492b01ca011b221fbfb74bba295da794ac15 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
-#include <net/net_namespace.h>
 
 #include "bridge_loop_avoidance.h"
 #include "hard-interface.h"
@@ -76,9 +75,9 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
  *
  * Compare the MAC address and the VLAN ID of the two TT entries and check if
  * they are the same TT client.
- * Return: 1 if the two TT clients are the same, 0 otherwise
+ * Return: true if the two TT clients are the same, false otherwise
  */
-static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
+static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_tt_common_entry,
                                         hash_entry);
@@ -215,6 +214,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
        tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
                                      common.refcount);
 
+       batadv_softif_vlan_put(tt_local_entry->vlan);
+
        kfree_rcu(tt_local_entry, common.rcu);
 }
 
@@ -583,6 +584,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global = NULL;
+       struct net *net = dev_net(soft_iface);
        struct batadv_softif_vlan *vlan;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
@@ -594,7 +596,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        u32 match_mark;
 
        if (ifindex != BATADV_NULL_IFINDEX)
-               in_dev = dev_get_by_index(&init_net, ifindex);
+               in_dev = dev_get_by_index(net, ifindex);
 
        tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
 
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        kref_get(&tt_local->common.refcount);
        tt_local->last_seen = jiffies;
        tt_local->common.added_at = tt_local->last_seen;
+       tt_local->vlan = vlan;
 
        /* the batman interface mac and multicast addresses should never be
         * purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
-       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
        unsigned short vid;
        u32 i;
@@ -1008,8 +1010,8 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
                   net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
-       seq_printf(seq, "       %-13s  %s %-8s %-9s (%-10s)\n", "Client", "VID",
-                  "Flags", "Last seen", "CRC");
+       seq_puts(seq,
+                "       Client         VID Flags    Last seen (CRC       )\n");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 1000;
 
                        no_purge = tt_common_entry->flags & np_flag;
-
-                       vlan = batadv_softif_vlan_get(bat_priv, vid);
-                       if (!vlan) {
-                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
-                                          BATADV_PRINT_VID(vid));
-                               continue;
-                       }
-
                        seq_printf(seq,
                                   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
                                   no_purge ? 0 : last_seen_secs,
                                   no_purge ? 0 : last_seen_msecs,
-                                  vlan->tt.crc);
-
-                       batadv_softif_vlan_put(vlan);
+                                  tt_local->vlan->tt.crc);
                }
                rcu_read_unlock();
        }
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       struct batadv_softif_vlan *vlan;
        void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
        /* extra call to free the local tt entry */
        batadv_tt_local_entry_put(tt_local_entry);
 
-       /* decrease the reference held for this vlan */
-       vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (!vlan)
-               goto out;
-
-       batadv_softif_vlan_put(vlan);
-       batadv_softif_vlan_put(vlan);
-
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv,
-                                                     tt_common_entry->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -1706,9 +1680,8 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Globally announced TT entries received via the mesh %s\n",
                   net_dev->name);
-       seq_printf(seq, "       %-13s  %s  %s       %-15s %s (%-10s) %s\n",
-                  "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
-                  "CRC", "Flags");
+       seq_puts(seq,
+                "       Client         VID  (TTVN)       Originator      (Curr TTVN) (CRC       ) Flags\n");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -2388,19 +2361,19 @@ unlock:
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
- * Return: 1 if the entry is a valid, 0 otherwise.
+ * Return: true if the entry is a valid, false otherwise.
  */
-static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
+static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
        const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 
        if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
-               return 0;
-       return 1;
+               return false;
+       return true;
 }
 
-static int batadv_tt_global_valid(const void *entry_ptr,
-                                 const void *data_ptr)
+static bool batadv_tt_global_valid(const void *entry_ptr,
+                                  const void *data_ptr)
 {
        const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
        const struct batadv_tt_global_entry *tt_global_entry;
@@ -2408,7 +2381,7 @@ static int batadv_tt_global_valid(const void *entry_ptr,
 
        if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
            tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
-               return 0;
+               return false;
 
        tt_global_entry = container_of(tt_common_entry,
                                       struct batadv_tt_global_entry,
@@ -2430,7 +2403,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
 static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
                                    struct batadv_hashtable *hash,
                                    void *tvlv_buff, u16 tt_len,
-                                   int (*valid_cb)(const void *, const void *),
+                                   bool (*valid_cb)(const void *,
+                                                    const void *),
                                    void *cb_data)
 {
        struct batadv_tt_common_entry *tt_common_entry;
@@ -2579,11 +2553,11 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
  *
  * Return: true if the TT Request was sent, false otherwise
  */
-static int batadv_send_tt_request(struct batadv_priv *bat_priv,
-                                 struct batadv_orig_node *dst_orig_node,
-                                 u8 ttvn,
-                                 struct batadv_tvlv_tt_vlan_data *tt_vlan,
-                                 u16 num_vlan, bool full_table)
+static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
+                                  struct batadv_orig_node *dst_orig_node,
+                                  u8 ttvn,
+                                  struct batadv_tvlv_tt_vlan_data *tt_vlan,
+                                  u16 num_vlan, bool full_table)
 {
        struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
        struct batadv_tt_req_node *tt_req_node = NULL;
@@ -3227,7 +3201,7 @@ static void batadv_tt_purge(struct work_struct *work)
        struct batadv_priv_tt *priv_tt;
        struct batadv_priv *bat_priv;
 
-       delayed_work = container_of(work, struct delayed_work, work);
+       delayed_work = to_delayed_work(work);
        priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
        bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
index 9abfb3e73c3448a9612c9c282166fe5e18b44327..6a577f4f8ba77ce1b2b923c55deada8a050802e8 100644 (file)
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
  * @ifinfo_lock: lock protecting private ifinfo members and list
  * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
        spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
        struct batadv_hard_iface *if_incoming;
        unsigned long last_seen;
+       struct batadv_hardif_neigh_node *hardif_neigh;
        struct kref refcount;
        struct rcu_head rcu;
 };
@@ -655,6 +657,9 @@ struct batadv_priv_tt {
  * @num_requests: number of bla requests in flight
  * @claim_hash: hash table containing mesh nodes this host has claimed
  * @backbone_hash: hash table containing all detected backbone gateways
+ * @loopdetect_addr: MAC address used for own loopdetection frames
+ * @loopdetect_lasttime: time when the loopdetection frames were sent
+ * @loopdetect_next: how many periods to wait for the next loopdetect process
  * @bcast_duplist: recently received broadcast packets array (for broadcast
  *  duplicate suppression)
  * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
@@ -666,6 +671,9 @@ struct batadv_priv_bla {
        atomic_t num_requests;
        struct batadv_hashtable *claim_hash;
        struct batadv_hashtable *backbone_hash;
+       u8 loopdetect_addr[ETH_ALEN];
+       unsigned long loopdetect_lasttime;
+       atomic_t loopdetect_next;
        struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
        int bcast_duplist_curr;
        /* protects bcast_duplist & bcast_duplist_curr */
@@ -1010,6 +1018,7 @@ struct batadv_socket_packet {
  *  resolved
  * @crc: crc16 checksum over all claims
  * @crc_lock: lock protecting crc
+ * @report_work: work struct for reporting detected loops
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -1023,6 +1032,7 @@ struct batadv_bla_backbone_gw {
        atomic_t request_sent;
        u16 crc;
        spinlock_t crc_lock; /* protects crc */
+       struct work_struct report_work;
        struct kref refcount;
        struct rcu_head rcu;
 };
@@ -1073,10 +1083,12 @@ struct batadv_tt_common_entry {
  * struct batadv_tt_local_entry - translation table local entry data
  * @common: general translation table data
  * @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
        struct batadv_tt_common_entry common;
        unsigned long last_seen;
+       struct batadv_softif_vlan *vlan;
 };
 
 /**
@@ -1250,6 +1262,8 @@ struct batadv_forw_packet {
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ *  up
  * @bat_iface_enable: init routing info when hard-interface is enabled
  * @bat_iface_disable: de-init routing info when hard-interface is disabled
  * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1291,7 @@ struct batadv_forw_packet {
 struct batadv_algo_ops {
        struct hlist_node list;
        char *name;
+       void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
        int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
index 8a4cc2f7f0db2a277e4281d5be62d76c7446a716..780089d75915b30649d322003a420c3cad70f3c0 100644 (file)
@@ -68,7 +68,7 @@ struct lowpan_peer {
        struct in6_addr peer_addr;
 };
 
-struct lowpan_dev {
+struct lowpan_btle_dev {
        struct list_head list;
 
        struct hci_dev *hdev;
@@ -80,18 +80,21 @@ struct lowpan_dev {
        struct delayed_work notify_peers;
 };
 
-static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
+static inline struct lowpan_btle_dev *
+lowpan_btle_dev(const struct net_device *netdev)
 {
-       return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
+       return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
 }
 
-static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
+static inline void peer_add(struct lowpan_btle_dev *dev,
+                           struct lowpan_peer *peer)
 {
        list_add_rcu(&peer->list, &dev->peers);
        atomic_inc(&dev->peer_count);
 }
 
-static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
+static inline bool peer_del(struct lowpan_btle_dev *dev,
+                           struct lowpan_peer *peer)
 {
        list_del_rcu(&peer->list);
        kfree_rcu(peer, rcu);
@@ -106,7 +109,7 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
        return false;
 }
 
-static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
+static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
                                                 bdaddr_t *ba, __u8 type)
 {
        struct lowpan_peer *peer;
@@ -134,8 +137,8 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
        return NULL;
 }
 
-static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
-                                                    struct l2cap_chan *chan)
+static inline struct lowpan_peer *
+__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
 {
        struct lowpan_peer *peer;
 
@@ -147,8 +150,8 @@ static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
        return NULL;
 }
 
-static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
-                                                    struct l2cap_conn *conn)
+static inline struct lowpan_peer *
+__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
 {
        struct lowpan_peer *peer;
 
@@ -160,7 +163,7 @@ static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
        return NULL;
 }
 
-static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
+static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
                                                  struct in6_addr *daddr,
                                                  struct sk_buff *skb)
 {
@@ -220,7 +223,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
 
 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
 {
-       struct lowpan_dev *entry;
+       struct lowpan_btle_dev *entry;
        struct lowpan_peer *peer = NULL;
 
        rcu_read_lock();
@@ -236,10 +239,10 @@ static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
        return peer;
 }
 
-static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
+static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
 {
-       struct lowpan_dev *entry;
-       struct lowpan_dev *dev = NULL;
+       struct lowpan_btle_dev *entry;
+       struct lowpan_btle_dev *dev = NULL;
 
        rcu_read_lock();
 
@@ -270,10 +273,10 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
                           struct l2cap_chan *chan)
 {
        const u8 *saddr, *daddr;
-       struct lowpan_dev *dev;
+       struct lowpan_btle_dev *dev;
        struct lowpan_peer *peer;
 
-       dev = lowpan_dev(netdev);
+       dev = lowpan_btle_dev(netdev);
 
        rcu_read_lock();
        peer = __peer_lookup_chan(dev, chan);
@@ -375,7 +378,7 @@ drop:
 /* Packet from BT LE device */
 static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct lowpan_dev *dev;
+       struct lowpan_btle_dev *dev;
        struct lowpan_peer *peer;
        int err;
 
@@ -431,15 +434,18 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
                        bdaddr_t *peer_addr, u8 *peer_addr_type)
 {
        struct in6_addr ipv6_daddr;
-       struct lowpan_dev *dev;
+       struct ipv6hdr *hdr;
+       struct lowpan_btle_dev *dev;
        struct lowpan_peer *peer;
        bdaddr_t addr, *any = BDADDR_ANY;
        u8 *daddr = any->b;
        int err, status = 0;
 
-       dev = lowpan_dev(netdev);
+       hdr = ipv6_hdr(skb);
+
+       dev = lowpan_btle_dev(netdev);
 
-       memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
+       memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
 
        if (ipv6_addr_is_multicast(&ipv6_daddr)) {
                lowpan_cb(skb)->chan = NULL;
@@ -489,15 +495,9 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
                         unsigned short type, const void *_daddr,
                         const void *_saddr, unsigned int len)
 {
-       struct ipv6hdr *hdr;
-
        if (type != ETH_P_IPV6)
                return -EINVAL;
 
-       hdr = ipv6_hdr(skb);
-
-       memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
-
        return 0;
 }
 
@@ -543,19 +543,19 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
 static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
 {
        struct sk_buff *local_skb;
-       struct lowpan_dev *entry;
+       struct lowpan_btle_dev *entry;
        int err = 0;
 
        rcu_read_lock();
 
        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
                struct lowpan_peer *pentry;
-               struct lowpan_dev *dev;
+               struct lowpan_btle_dev *dev;
 
                if (entry->netdev != netdev)
                        continue;
 
-               dev = lowpan_dev(entry->netdev);
+               dev = lowpan_btle_dev(entry->netdev);
 
                list_for_each_entry_rcu(pentry, &dev->peers, list) {
                        int ret;
@@ -723,8 +723,8 @@ static void ifdown(struct net_device *netdev)
 
 static void do_notify_peers(struct work_struct *work)
 {
-       struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
-                                             notify_peers.work);
+       struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
+                                                  notify_peers.work);
 
        netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
 }
@@ -766,7 +766,7 @@ static void set_ip_addr_bits(u8 addr_type, u8 *addr)
 }
 
 static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
-                                       struct lowpan_dev *dev)
+                                       struct lowpan_btle_dev *dev)
 {
        struct lowpan_peer *peer;
 
@@ -803,12 +803,12 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
        return peer->chan;
 }
 
-static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
+static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
 {
        struct net_device *netdev;
        int err = 0;
 
-       netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+       netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
                              IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
                              netdev_setup);
        if (!netdev)
@@ -820,7 +820,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-       *dev = lowpan_dev(netdev);
+       *dev = lowpan_btle_dev(netdev);
        (*dev)->netdev = netdev;
        (*dev)->hdev = chan->conn->hcon->hdev;
        INIT_LIST_HEAD(&(*dev)->peers);
@@ -853,7 +853,7 @@ out:
 
 static inline void chan_ready_cb(struct l2cap_chan *chan)
 {
-       struct lowpan_dev *dev;
+       struct lowpan_btle_dev *dev;
 
        dev = lookup_dev(chan->conn);
 
@@ -890,8 +890,9 @@ static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
 
 static void delete_netdev(struct work_struct *work)
 {
-       struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
-                                               delete_netdev);
+       struct lowpan_btle_dev *entry = container_of(work,
+                                                    struct lowpan_btle_dev,
+                                                    delete_netdev);
 
        lowpan_unregister_netdev(entry->netdev);
 
@@ -900,8 +901,8 @@ static void delete_netdev(struct work_struct *work)
 
 static void chan_close_cb(struct l2cap_chan *chan)
 {
-       struct lowpan_dev *entry;
-       struct lowpan_dev *dev = NULL;
+       struct lowpan_btle_dev *entry;
+       struct lowpan_btle_dev *dev = NULL;
        struct lowpan_peer *peer;
        int err = -ENOENT;
        bool last = false, remove = true;
@@ -921,7 +922,7 @@ static void chan_close_cb(struct l2cap_chan *chan)
        spin_lock(&devices_lock);
 
        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
-               dev = lowpan_dev(entry->netdev);
+               dev = lowpan_btle_dev(entry->netdev);
                peer = __peer_lookup_chan(dev, chan);
                if (peer) {
                        last = peer_del(dev, peer);
@@ -1131,7 +1132,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
 
 static void disconnect_all_peers(void)
 {
-       struct lowpan_dev *entry;
+       struct lowpan_btle_dev *entry;
        struct lowpan_peer *peer, *tmp_peer, *new_peer;
        struct list_head peers;
 
@@ -1291,7 +1292,7 @@ static ssize_t lowpan_control_write(struct file *fp,
 
 static int lowpan_control_show(struct seq_file *f, void *ptr)
 {
-       struct lowpan_dev *entry;
+       struct lowpan_btle_dev *entry;
        struct lowpan_peer *peer;
 
        spin_lock(&devices_lock);
@@ -1322,7 +1323,7 @@ static const struct file_operations lowpan_control_fops = {
 
 static void disconnect_devices(void)
 {
-       struct lowpan_dev *entry, *tmp, *new_dev;
+       struct lowpan_btle_dev *entry, *tmp, *new_dev;
        struct list_head devices;
 
        INIT_LIST_HEAD(&devices);
@@ -1360,7 +1361,7 @@ static int device_event(struct notifier_block *unused,
                        unsigned long event, void *ptr)
 {
        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-       struct lowpan_dev *entry;
+       struct lowpan_btle_dev *entry;
 
        if (netdev->type != ARPHRD_6LOWPAN)
                return NOTIFY_DONE;
index 955eda93e66f32b055277a93226ca66b0075fbaf..3df7aefb7663356aeb1140f49e8e62317c5c050b 100644 (file)
@@ -65,7 +65,7 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
 void bt_sock_reclassify_lock(struct sock *sk, int proto)
 {
        BUG_ON(!sk);
-       BUG_ON(sock_owned_by_user(sk));
+       BUG_ON(!sock_allow_reclassification(sk));
 
        sock_lock_init_class_and_name(sk,
                        bt_slock_key_strings[proto], &bt_slock_key[proto],
index 6ceb5d36a32bdc375e635d34085a9b016568e16a..f4fcb4a9d5c1ed6e3fe81190ece8aa245808b395 100644 (file)
@@ -188,7 +188,7 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
         * So we have to queue them and wake up session thread which is sleeping
         * on the sk_sleep(sk).
         */
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
        skb_queue_tail(&sk->sk_write_queue, skb);
        wake_up_interruptible(sk_sleep(sk));
 
index c162af5d16bf3d4367a058f920d95cb1747ab925..d4b3dd5413bef561c3c273d354b6d25ef7465153 100644 (file)
@@ -4727,6 +4727,19 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
        u32 flags;
        u8 *ptr, real_len;
 
+       switch (type) {
+       case LE_ADV_IND:
+       case LE_ADV_DIRECT_IND:
+       case LE_ADV_SCAN_IND:
+       case LE_ADV_NONCONN_IND:
+       case LE_ADV_SCAN_RSP:
+               break;
+       default:
+               BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
+                                  type);
+               return;
+       }
+
        /* Find the end of the data in case the report contains padded zero
         * bytes at the end causing an invalid length value.
         *
index 6e125d76df0d4ff539abe8a1fe2ccbfc83883afd..c045b3c54768e478f49f600fa8ffdae4a59456fc 100644 (file)
@@ -1065,6 +1065,9 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
        if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
                flags |= LE_AD_LIMITED;
 
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+               flags |= LE_AD_NO_BREDR;
+
        if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
                /* If a discovery flag wasn't provided, simply use the global
                 * settings.
@@ -1072,9 +1075,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
                if (!flags)
                        flags |= mgmt_get_adv_discov_flags(hdev);
 
-               if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
-                       flags |= LE_AD_NO_BREDR;
-
                /* If flags would still be empty, then there is no need to
                 * include the "Flags" AD field".
                 */
index e4cae72895a72bebc4c95b430d688e83cfebfada..388ee8b5914506d0a02757fa00def796ce3ede4e 100644 (file)
@@ -778,7 +778,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                }
 
                if (sec.level < BT_SECURITY_LOW ||
-                   sec.level > BT_SECURITY_HIGH) {
+                   sec.level > BT_SECURITY_FIPS) {
                        err = -EINVAL;
                        break;
                }
index 263b4de4de57ccf4beddbfc48e62967236450b9b..d99b2009771a61df50d185b4e1d46ce6a3c40f94 100644 (file)
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* called with RTNL */
 static int get_bridge_ifindices(struct net *net, int *indices, int num)
 {
        struct net_device *dev;
        int i = 0;
 
-       for_each_netdev(net, dev) {
+       rcu_read_lock();
+       for_each_netdev_rcu(net, dev) {
                if (i >= num)
                        break;
                if (dev->priv_flags & IFF_EBRIDGE)
                        indices[i++] = dev->ifindex;
        }
+       rcu_read_unlock();
 
        return i;
 }
@@ -112,7 +113,9 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct net_bridge *br = netdev_priv(dev);
+       struct net_bridge_port *p = NULL;
        unsigned long args[4];
+       int ret = -EOPNOTSUPP;
 
        if (copy_from_user(args, rq->ifr_data, sizeof(args)))
                return -EFAULT;
@@ -182,25 +185,29 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
-               return br_set_forward_delay(br, args[1]);
+               ret = br_set_forward_delay(br, args[1]);
+               break;
 
        case BRCTL_SET_BRIDGE_HELLO_TIME:
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
-               return br_set_hello_time(br, args[1]);
+               ret = br_set_hello_time(br, args[1]);
+               break;
 
        case BRCTL_SET_BRIDGE_MAX_AGE:
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
-               return br_set_max_age(br, args[1]);
+               ret = br_set_max_age(br, args[1]);
+               break;
 
        case BRCTL_SET_AGEING_TIME:
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
-               return br_set_ageing_time(br, args[1]);
+               ret = br_set_ageing_time(br, args[1]);
+               break;
 
        case BRCTL_GET_PORT_INFO:
        {
@@ -240,20 +247,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                        return -EPERM;
 
                br_stp_set_enabled(br, args[1]);
-               return 0;
+               ret = 0;
+               break;
 
        case BRCTL_SET_BRIDGE_PRIORITY:
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
                br_stp_set_bridge_priority(br, args[1]);
-               return 0;
+               ret = 0;
+               break;
 
        case BRCTL_SET_PORT_PRIORITY:
        {
-               struct net_bridge_port *p;
-               int ret;
-
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
@@ -263,14 +269,11 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                else
                        ret = br_stp_set_port_priority(p, args[2]);
                spin_unlock_bh(&br->lock);
-               return ret;
+               break;
        }
 
        case BRCTL_SET_PATH_COST:
        {
-               struct net_bridge_port *p;
-               int ret;
-
                if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
 
@@ -280,8 +283,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                else
                        ret = br_stp_set_path_cost(p, args[2]);
                spin_unlock_bh(&br->lock);
-
-               return ret;
+               break;
        }
 
        case BRCTL_GET_FDB_ENTRIES:
@@ -289,7 +291,14 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                                       args[2], args[3]);
        }
 
-       return -EOPNOTSUPP;
+       if (!ret) {
+               if (p)
+                       br_ifinfo_notify(RTM_NEWLINK, p);
+               else
+                       netdev_state_change(br->dev);
+       }
+
+       return ret;
 }
 
 static int old_deviceless(struct net *net, void __user *uarg)
index 253bc77eda3bd1106021a7d93ba6854964f0a935..7dbc80d01eb00ab69fc06a0d613167651a9b15f2 100644 (file)
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
                e->flags |= MDB_FLAGS_OFFLOAD;
 }
 
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+{
+       memset(ip, 0, sizeof(struct br_ip));
+       ip->vid = entry->vid;
+       ip->proto = entry->addr.proto;
+       if (ip->proto == htons(ETH_P_IP))
+               ip->u.ip4 = entry->addr.u.ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+       else
+               ip->u.ip6 = entry->addr.u.ip6;
+#endif
+}
+
 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev)
 {
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
                + nla_total_size(sizeof(struct br_mdb_entry));
 }
 
-static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
-                           int type, struct net_bridge_port_group *pg)
+struct br_mdb_complete_info {
+       struct net_bridge_port *port;
+       struct br_ip ip;
+};
+
+static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 {
+       struct br_mdb_complete_info *data = priv;
+       struct net_bridge_port_group __rcu **pp;
+       struct net_bridge_port_group *p;
+       struct net_bridge_mdb_htable *mdb;
+       struct net_bridge_mdb_entry *mp;
+       struct net_bridge_port *port = data->port;
+       struct net_bridge *br = port->br;
+
+       if (err)
+               goto err;
+
+       spin_lock_bh(&br->multicast_lock);
+       mdb = mlock_dereference(br->mdb, br);
+       mp = br_mdb_ip_get(mdb, &data->ip);
+       if (!mp)
+               goto out;
+       for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
+               if (p->port != port)
+                       continue;
+               p->flags |= MDB_PG_FLAGS_OFFLOAD;
+       }
+out:
+       spin_unlock_bh(&br->multicast_lock);
+err:
+       kfree(priv);
+}
+
+static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
+                           struct br_mdb_entry *entry, int type)
+{
+       struct br_mdb_complete_info *complete_info;
        struct switchdev_obj_port_mdb mdb = {
                .obj = {
                        .id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
 
        mdb.obj.orig_dev = port_dev;
        if (port_dev && type == RTM_NEWMDB) {
-               err = switchdev_port_obj_add(port_dev, &mdb.obj);
-               if (!err && pg)
-                       pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+               complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+               if (complete_info) {
+                       complete_info->port = p;
+                       __mdb_entry_to_br_ip(entry, &complete_info->ip);
+                       mdb.obj.complete_priv = complete_info;
+                       mdb.obj.complete = br_mdb_complete;
+                       switchdev_port_obj_add(port_dev, &mdb.obj);
+               }
        } else if (port_dev && type == RTM_DELMDB) {
                switchdev_port_obj_del(port_dev, &mdb.obj);
        }
@@ -291,21 +345,21 @@ errout:
        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 }
 
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-                  int type)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type, u8 flags)
 {
        struct br_mdb_entry entry;
 
        memset(&entry, 0, sizeof(entry));
-       entry.ifindex = pg->port->dev->ifindex;
-       entry.addr.proto = pg->addr.proto;
-       entry.addr.u.ip4 = pg->addr.u.ip4;
+       entry.ifindex = port->dev->ifindex;
+       entry.addr.proto = group->proto;
+       entry.addr.u.ip4 = group->u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-       entry.addr.u.ip6 = pg->addr.u.ip6;
+       entry.addr.u.ip6 = group->u.ip6;
 #endif
-       entry.vid = pg->addr.vid;
-       __mdb_entry_fill_flags(&entry, pg->flags);
-       __br_mdb_notify(dev, &entry, type, pg);
+       entry.vid = group->vid;
+       __mdb_entry_fill_flags(&entry, flags);
+       __br_mdb_notify(dev, port, &entry, type);
 }
 
 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
-                           struct br_ip *group, unsigned char state,
-                           struct net_bridge_port_group **pg)
+                           struct br_ip *group, unsigned char state)
 {
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
-       *pg = p;
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 }
 
 static int __br_mdb_add(struct net *net, struct net_bridge *br,
-                       struct br_mdb_entry *entry,
-                       struct net_bridge_port_group **pg)
+                       struct br_mdb_entry *entry)
 {
        struct br_ip ip;
        struct net_device *dev;
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
                return -EINVAL;
 
-       memset(&ip, 0, sizeof(ip));
-       ip.vid = entry->vid;
-       ip.proto = entry->addr.proto;
-       if (ip.proto == htons(ETH_P_IP))
-               ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-       else
-               ip.u.ip6 = entry->addr.u.ip6;
-#endif
+       __mdb_entry_to_br_ip(entry, &ip);
 
        spin_lock_bh(&br->multicast_lock);
-       ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
+       ret = br_mdb_add_group(br, p, &ip, entry->state);
        spin_unlock_bh(&br->multicast_lock);
        return ret;
 }
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
-       struct net_bridge_port_group *pg;
        struct net_bridge_vlan_group *vg;
        struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_vlan_enabled(br) && vg && entry->vid == 0) {
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        entry->vid = v->vid;
-                       err = __br_mdb_add(net, br, entry, &pg);
+                       err = __br_mdb_add(net, br, entry);
                        if (err)
                                break;
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+                       __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
                }
        } else {
-               err = __br_mdb_add(net, br, entry, &pg);
+               err = __br_mdb_add(net, br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+                       __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
        }
 
        return err;
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
        if (!netif_running(br->dev) || br->multicast_disabled)
                return -EINVAL;
 
-       memset(&ip, 0, sizeof(ip));
-       ip.vid = entry->vid;
-       ip.proto = entry->addr.proto;
-       if (ip.proto == htons(ETH_P_IP))
-               ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-       else
-               ip.u.ip6 = entry->addr.u.ip6;
-#endif
+       __mdb_entry_to_br_ip(entry, &ip);
 
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                        entry->vid = v->vid;
                        err = __br_mdb_del(br, entry);
                        if (!err)
-                               __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+                               __br_mdb_notify(dev, p, entry, RTM_DELMDB);
                }
        } else {
                err = __br_mdb_del(br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+                       __br_mdb_notify(dev, p, entry, RTM_DELMDB);
        }
 
        return err;
index a4c15df2b7920301f7d12918dc21a1d6f2474352..6852f3c7009c2b1cc2cbfa9e08b6b15104b4d12b 100644 (file)
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
-               br_mdb_notify(br->dev, p, RTM_DELMDB);
+               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+                             p->flags);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, p, RTM_NEWMDB);
+       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1278,6 +1279,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        struct br_ip saddr;
        unsigned long max_delay;
        unsigned long now = jiffies;
+       unsigned int offset = skb_transport_offset(skb);
        __be32 group;
        int err = 0;
 
@@ -1288,14 +1290,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
 
        group = ih->group;
 
-       if (skb->len == sizeof(*ih)) {
+       if (skb->len == offset + sizeof(*ih)) {
                max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
 
                if (!max_delay) {
                        max_delay = 10 * HZ;
                        group = 0;
                }
-       } else if (skb->len >= sizeof(*ih3)) {
+       } else if (skb->len >= offset + sizeof(*ih3)) {
                ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs)
                        goto out;
@@ -1356,6 +1358,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        struct br_ip saddr;
        unsigned long max_delay;
        unsigned long now = jiffies;
+       unsigned int offset = skb_transport_offset(skb);
        const struct in6_addr *group = NULL;
        bool is_general_query;
        int err = 0;
@@ -1365,8 +1368,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
            (port && port->state == BR_STATE_DISABLED))
                goto out;
 
-       if (skb->len == sizeof(*mld)) {
-               if (!pskb_may_pull(skb, sizeof(*mld))) {
+       if (skb->len == offset + sizeof(*mld)) {
+               if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
                        err = -EINVAL;
                        goto out;
                }
@@ -1375,7 +1378,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                if (max_delay)
                        group = &mld->mld_mca;
        } else {
-               if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+               if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
                        err = -EINVAL;
                        goto out;
                }
@@ -1461,7 +1464,8 @@ br_multicast_leave_group(struct net_bridge *br,
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, p, RTM_DELMDB);
+                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+                                     p->flags);
 
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
index 44114a94c576ac8bc55aed54d3b3a22dbf0a4301..2d25979273a6f57378da645460d9d6c2a0d91e5c 100644 (file)
@@ -217,13 +217,13 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
 
        len = ntohs(iph->tot_len);
        if (skb->len < len) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
+               __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
                goto drop;
        } else if (len < (iph->ihl*4))
                goto inhdr_error;
 
        if (pskb_trim_rcsum(skb, len)) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+               __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -236,7 +236,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
        return 0;
 
 inhdr_error:
-       IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+       __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 drop:
        return -1;
 }
index d61f56efc8dc3a2bc7ca440ae07f301a7cb6eaed..5e59a8457e7b19ddbd80a0f295a206e9202f3ccd 100644 (file)
@@ -122,13 +122,13 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
 
        if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
                if (pkt_len + ip6h_len > skb->len) {
-                       IP6_INC_STATS_BH(net, idev,
-                                        IPSTATS_MIB_INTRUNCATEDPKTS);
+                       __IP6_INC_STATS(net, idev,
+                                       IPSTATS_MIB_INTRUNCATEDPKTS);
                        goto drop;
                }
                if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
-                       IP6_INC_STATS_BH(net, idev,
-                                        IPSTATS_MIB_INDISCARDS);
+                       __IP6_INC_STATS(net, idev,
+                                       IPSTATS_MIB_INDISCARDS);
                        goto drop;
                }
        }
@@ -142,7 +142,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
        return 0;
 
 inhdr_error:
-       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
        return -1;
 }
index e9c635eae24de298ac028b8d3d80034dd6d93391..a5343c7232bfbf37ca95580a70a740950354e691 100644 (file)
@@ -135,9 +135,9 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(sizeof(u16))   /* IFLA_BRPORT_NO */
                + nla_total_size(sizeof(u8))    /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
                + nla_total_size(sizeof(u8))    /* IFLA_BRPORT_CONFIG_PENDING */
-               + nla_total_size(sizeof(u64))   /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
-               + nla_total_size(sizeof(u64))   /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
-               + nla_total_size(sizeof(u64))   /* IFLA_BRPORT_HOLD_TIMER */
+               + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+               + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+               + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
                + nla_total_size(sizeof(u8))    /* IFLA_BRPORT_MULTICAST_ROUTER */
 #endif
@@ -190,13 +190,16 @@ static int br_port_fill_attrs(struct sk_buff *skb,
                return -EMSGSIZE;
 
        timerval = br_timer_value(&p->message_age_timer);
-       if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
+       if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
+                             IFLA_BRPORT_PAD))
                return -EMSGSIZE;
        timerval = br_timer_value(&p->forward_delay_timer);
-       if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
+       if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
+                             IFLA_BRPORT_PAD))
                return -EMSGSIZE;
        timerval = br_timer_value(&p->hold_timer);
-       if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
+       if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
+                             IFLA_BRPORT_PAD))
                return -EMSGSIZE;
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -847,6 +850,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
        [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
        [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
        [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
+       [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -918,6 +922,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
                if (err)
                        return err;
        }
+
+       if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
+               __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
+
+               err = br_vlan_set_stats(br, vlan_stats);
+               if (err)
+                       return err;
+       }
 #endif
 
        if (data[IFLA_BR_GROUP_FWD_MASK]) {
@@ -1079,6 +1091,7 @@ static size_t br_get_size(const struct net_device *brdev)
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
               nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
               nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
+              nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
 #endif
               nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
               nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
@@ -1087,10 +1100,10 @@ static size_t br_get_size(const struct net_device *brdev)
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_HELLO_TIMER */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_TCN_TIMER */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_GC_TIMER */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
               nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
@@ -1101,12 +1114,12 @@ static size_t br_get_size(const struct net_device *brdev)
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERIER_INTVL */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_INTVL */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
-              nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
+              nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
               nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
@@ -1129,16 +1142,17 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        u64 clockval;
 
        clockval = br_timer_value(&br->hello_timer);
-       if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = br_timer_value(&br->tcn_timer);
-       if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = br_timer_value(&br->topology_change_timer);
-       if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = br_timer_value(&br->gc_timer);
-       if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
                return -EMSGSIZE;
 
        if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
@@ -1163,7 +1177,8 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
-           nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid))
+           nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
+           nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
                return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -1182,22 +1197,28 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
                return -EMSGSIZE;
 
        clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = jiffies_to_clock_t(br->multicast_membership_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = jiffies_to_clock_t(br->multicast_querier_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = jiffies_to_clock_t(br->multicast_query_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
        clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
-       if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
+       if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
+                             IFLA_BR_PAD))
                return -EMSGSIZE;
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
@@ -1213,6 +1234,69 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        return 0;
 }
 
+static size_t br_get_linkxstats_size(const struct net_device *dev)
+{
+       struct net_bridge *br = netdev_priv(dev);
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *v;
+       int numvls = 0;
+
+       vg = br_vlan_group(br);
+       if (!vg)
+               return 0;
+
+       /* we need to count all, even placeholder entries */
+       list_for_each_entry(v, &vg->vlan_list, vlist)
+               numvls++;
+
+       /* account for the vlans and the link xstats type nest attribute */
+       return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
+              nla_total_size(0);
+}
+
+static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
+                             int *prividx)
+{
+       struct net_bridge *br = netdev_priv(dev);
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *v;
+       struct nlattr *nest;
+       int vl_idx = 0;
+
+       vg = br_vlan_group(br);
+       if (!vg)
+               goto out;
+       nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
+       if (!nest)
+               return -EMSGSIZE;
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               struct bridge_vlan_xstats vxi;
+               struct br_vlan_stats stats;
+
+               if (vl_idx++ < *prividx)
+                       continue;
+               memset(&vxi, 0, sizeof(vxi));
+               vxi.vid = v->vid;
+               br_vlan_get_stats(v, &stats);
+               vxi.rx_bytes = stats.rx_bytes;
+               vxi.rx_packets = stats.rx_packets;
+               vxi.tx_bytes = stats.tx_bytes;
+               vxi.tx_packets = stats.tx_packets;
+
+               if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
+                       goto nla_put_failure;
+       }
+       nla_nest_end(skb, nest);
+       *prividx = 0;
+out:
+       return 0;
+
+nla_put_failure:
+       nla_nest_end(skb, nest);
+       *prividx = vl_idx;
+
+       return -EMSGSIZE;
+}
 
 static struct rtnl_af_ops br_af_ops __read_mostly = {
        .family                 = AF_BRIDGE,
@@ -1231,6 +1315,8 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
        .dellink                = br_dev_delete,
        .get_size               = br_get_size,
        .fill_info              = br_fill_info,
+       .fill_linkxstats        = br_fill_linkxstats,
+       .get_linkxstats_size    = br_get_linkxstats_size,
 
        .slave_maxtype          = IFLA_BRPORT_MAX,
        .slave_policy           = br_port_policy,
index 1b5d145dfcbf28beb5d7dfd3a28fcdaf919e26aa..c7fb5d7a7218c2c286160874fc30278108083ca6 100644 (file)
@@ -77,12 +77,21 @@ struct bridge_mcast_querier {
 };
 #endif
 
+struct br_vlan_stats {
+       u64 rx_bytes;
+       u64 rx_packets;
+       u64 tx_bytes;
+       u64 tx_packets;
+       struct u64_stats_sync syncp;
+};
+
 /**
  * struct net_bridge_vlan - per-vlan entry
  *
  * @vnode: rhashtable member
  * @vid: VLAN id
  * @flags: bridge vlan flags
+ * @stats: per-cpu VLAN statistics
  * @br: if MASTER flag set, this points to a bridge struct
  * @port: if MASTER flag unset, this points to a port struct
  * @refcnt: if MASTER flag set, this is bumped for each port referencing it
@@ -100,6 +109,7 @@ struct net_bridge_vlan {
        struct rhash_head               vnode;
        u16                             vid;
        u16                             flags;
+       struct br_vlan_stats __percpu   *stats;
        union {
                struct net_bridge       *br;
                struct net_bridge_port  *port;
@@ -342,6 +352,7 @@ struct net_bridge
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        struct net_bridge_vlan_group    __rcu *vlgrp;
        u8                              vlan_enabled;
+       u8                              vlan_stats_enabled;
        __be16                          vlan_proto;
        u16                             default_pvid;
 #endif
@@ -560,8 +571,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
                            unsigned char flags);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-                  int type);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type, u8 flags);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
                   int type);
 
@@ -691,6 +702,7 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
+int br_vlan_set_stats(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
@@ -699,6 +711,8 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
 void nbp_vlan_flush(struct net_bridge_port *port);
 int nbp_vlan_init(struct net_bridge_port *port);
 int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
+void br_vlan_get_stats(const struct net_bridge_vlan *v,
+                      struct br_vlan_stats *stats);
 
 static inline struct net_bridge_vlan_group *br_vlan_group(
                                        const struct net_bridge *br)
@@ -881,6 +895,10 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
        return NULL;
 }
 
+static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
+                                    struct br_vlan_stats *stats)
+{
+}
 #endif
 
 struct nf_br_ops {
index 6b8091407ca3126b333c76c5104f5a8ca6803e49..beb47071e38df5761dc96c1138833ed89b285657 100644 (file)
@@ -43,7 +43,14 @@ static ssize_t store_bridge_parm(struct device *d,
        if (endp == buf)
                return -EINVAL;
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        err = (*set)(br, val);
+       if (!err)
+               netdev_state_change(br->dev);
+       rtnl_unlock();
+
        return err ? err : len;
 }
 
@@ -101,15 +108,7 @@ static ssize_t ageing_time_show(struct device *d,
 
 static int set_ageing_time(struct net_bridge *br, unsigned long val)
 {
-       int ret;
-
-       if (!rtnl_trylock())
-               return restart_syscall();
-
-       ret = br_set_ageing_time(br, val);
-       rtnl_unlock();
-
-       return ret;
+       return br_set_ageing_time(br, val);
 }
 
 static ssize_t ageing_time_store(struct device *d,
@@ -128,27 +127,18 @@ static ssize_t stp_state_show(struct device *d,
 }
 
 
+static int set_stp_state(struct net_bridge *br, unsigned long val)
+{
+       br_stp_set_enabled(br, val);
+
+       return 0;
+}
+
 static ssize_t stp_state_store(struct device *d,
                               struct device_attribute *attr, const char *buf,
                               size_t len)
 {
-       struct net_bridge *br = to_bridge(d);
-       char *endp;
-       unsigned long val;
-
-       if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
-
-       if (!rtnl_trylock())
-               return restart_syscall();
-       br_stp_set_enabled(br, val);
-       rtnl_unlock();
-
-       return len;
+       return store_bridge_parm(d, buf, len, set_stp_state);
 }
 static DEVICE_ATTR_RW(stp_state);
 
@@ -160,29 +150,22 @@ static ssize_t group_fwd_mask_show(struct device *d,
        return sprintf(buf, "%#x\n", br->group_fwd_mask);
 }
 
-
-static ssize_t group_fwd_mask_store(struct device *d,
-                                   struct device_attribute *attr,
-                                   const char *buf,
-                                   size_t len)
+static int set_group_fwd_mask(struct net_bridge *br, unsigned long val)
 {
-       struct net_bridge *br = to_bridge(d);
-       char *endp;
-       unsigned long val;
-
-       if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
-
        if (val & BR_GROUPFWD_RESTRICTED)
                return -EINVAL;
 
        br->group_fwd_mask = val;
 
-       return len;
+       return 0;
+}
+
+static ssize_t group_fwd_mask_store(struct device *d,
+                                   struct device_attribute *attr,
+                                   const char *buf,
+                                   size_t len)
+{
+       return store_bridge_parm(d, buf, len, set_group_fwd_mask);
 }
 static DEVICE_ATTR_RW(group_fwd_mask);
 
@@ -328,6 +311,7 @@ static ssize_t group_addr_store(struct device *d,
 
        br->group_addr_set = true;
        br_recalculate_fwd_mask(br);
+       netdev_state_change(br->dev);
 
        rtnl_unlock();
 
@@ -336,17 +320,17 @@ static ssize_t group_addr_store(struct device *d,
 
 static DEVICE_ATTR_RW(group_addr);
 
+static int set_flush(struct net_bridge *br, unsigned long val)
+{
+       br_fdb_flush(br);
+       return 0;
+}
+
 static ssize_t flush_store(struct device *d,
                           struct device_attribute *attr,
                           const char *buf, size_t len)
 {
-       struct net_bridge *br = to_bridge(d);
-
-       if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       br_fdb_flush(br);
-       return len;
+       return store_bridge_parm(d, buf, len, set_flush);
 }
 static DEVICE_ATTR_WO(flush);
 
@@ -747,6 +731,22 @@ static ssize_t default_pvid_store(struct device *d,
        return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid);
 }
 static DEVICE_ATTR_RW(default_pvid);
+
+static ssize_t vlan_stats_enabled_show(struct device *d,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct net_bridge *br = to_bridge(d);
+       return sprintf(buf, "%u\n", br->vlan_stats_enabled);
+}
+
+static ssize_t vlan_stats_enabled_store(struct device *d,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return store_bridge_parm(d, buf, len, br_vlan_set_stats);
+}
+static DEVICE_ATTR_RW(vlan_stats_enabled);
 #endif
 
 static struct attribute *bridge_attrs[] = {
@@ -794,6 +794,7 @@ static struct attribute *bridge_attrs[] = {
        &dev_attr_vlan_filtering.attr,
        &dev_attr_vlan_protocol.attr,
        &dev_attr_default_pvid.attr,
+       &dev_attr_vlan_stats_enabled.attr,
 #endif
        NULL
 };
index efe415ad842a4fad159fb24590d11f6308a88f83..1e04d4d44273f50e60c4eee4b3d40d63a1f03500 100644 (file)
@@ -61,7 +61,6 @@ static int store_flag(struct net_bridge_port *p, unsigned long v,
        if (flags != p->flags) {
                p->flags = flags;
                br_port_flags_change(p, mask);
-               br_ifinfo_notify(RTM_NEWLINK, p);
        }
        return 0;
 }
@@ -253,8 +252,10 @@ static ssize_t brport_store(struct kobject *kobj,
                        spin_lock_bh(&p->br->lock);
                        ret = brport_attr->store(p, val);
                        spin_unlock_bh(&p->br->lock);
-                       if (ret == 0)
+                       if (!ret) {
+                               br_ifinfo_notify(RTM_NEWLINK, p);
                                ret = count;
+                       }
                }
                rtnl_unlock();
        }
index 9309bb4f2a5b240edcea6f9c3a4a45d2c6413874..b6de4f45716184d04f84886f8fdb0481c31012ad 100644 (file)
@@ -162,6 +162,17 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
        return masterv;
 }
 
+static void br_master_vlan_rcu_free(struct rcu_head *rcu)
+{
+       struct net_bridge_vlan *v;
+
+       v = container_of(rcu, struct net_bridge_vlan, rcu);
+       WARN_ON(!br_vlan_is_master(v));
+       free_percpu(v->stats);
+       v->stats = NULL;
+       kfree(v);
+}
+
 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 {
        struct net_bridge_vlan_group *vg;
@@ -174,7 +185,7 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv)
                rhashtable_remove_fast(&vg->vlan_hash,
                                       &masterv->vnode, br_vlan_rht_params);
                __vlan_del_list(masterv);
-               kfree_rcu(masterv, rcu);
+               call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
        }
 }
 
@@ -230,6 +241,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
                if (!masterv)
                        goto out_filt;
                v->brvlan = masterv;
+               v->stats = masterv->stats;
        }
 
        /* Add the dev mac and count the vlan only if it's usable */
@@ -329,6 +341,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
                               struct net_bridge_vlan_group *vg,
                               struct sk_buff *skb)
 {
+       struct br_vlan_stats *stats;
        struct net_bridge_vlan *v;
        u16 vid;
 
@@ -355,18 +368,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
                        return NULL;
                }
        }
+       if (br->vlan_stats_enabled) {
+               stats = this_cpu_ptr(v->stats);
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_bytes += skb->len;
+               stats->tx_packets++;
+               u64_stats_update_end(&stats->syncp);
+       }
+
        if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
                skb->vlan_tci = 0;
-
 out:
        return skb;
 }
 
 /* Called under RCU */
-static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
+static bool __allowed_ingress(const struct net_bridge *br,
+                             struct net_bridge_vlan_group *vg,
                              struct sk_buff *skb, u16 *vid)
 {
-       const struct net_bridge_vlan *v;
+       struct br_vlan_stats *stats;
+       struct net_bridge_vlan *v;
        bool tagged;
 
        BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
@@ -375,7 +397,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
         * HW accelerated vlan tag.
         */
        if (unlikely(!skb_vlan_tag_present(skb) &&
-                    skb->protocol == proto)) {
+                    skb->protocol == br->vlan_proto)) {
                skb = skb_vlan_untag(skb);
                if (unlikely(!skb))
                        return false;
@@ -383,7 +405,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
 
        if (!br_vlan_get_tag(skb, vid)) {
                /* Tagged frame */
-               if (skb->vlan_proto != proto) {
+               if (skb->vlan_proto != br->vlan_proto) {
                        /* Protocol-mismatch, empty out vlan_tci for new tag */
                        skb_push(skb, ETH_HLEN);
                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
@@ -419,7 +441,7 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
                *vid = pvid;
                if (likely(!tagged))
                        /* Untagged Frame. */
-                       __vlan_hwaccel_put_tag(skb, proto, pvid);
+                       __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
                else
                        /* Priority-tagged Frame.
                         * At this point, We know that skb->vlan_tci had
@@ -428,13 +450,24 @@ static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
                         */
                        skb->vlan_tci |= pvid;
 
-               return true;
+               /* if stats are disabled we can avoid the lookup */
+               if (!br->vlan_stats_enabled)
+                       return true;
        }
-
-       /* Frame had a valid vlan tag.  See if vlan is allowed */
        v = br_vlan_find(vg, *vid);
-       if (v && br_vlan_should_use(v))
-               return true;
+       if (!v || !br_vlan_should_use(v))
+               goto drop;
+
+       if (br->vlan_stats_enabled) {
+               stats = this_cpu_ptr(v->stats);
+               u64_stats_update_begin(&stats->syncp);
+               stats->rx_bytes += skb->len;
+               stats->rx_packets++;
+               u64_stats_update_end(&stats->syncp);
+       }
+
+       return true;
+
 drop:
        kfree_skb(skb);
        return false;
@@ -452,7 +485,7 @@ bool br_allowed_ingress(const struct net_bridge *br,
                return true;
        }
 
-       return __allowed_ingress(vg, br->vlan_proto, skb, vid);
+       return __allowed_ingress(br, vg, skb, vid);
 }
 
 /* Called under RCU. */
@@ -542,6 +575,11 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
        if (!vlan)
                return -ENOMEM;
 
+       vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+       if (!vlan->stats) {
+               kfree(vlan);
+               return -ENOMEM;
+       }
        vlan->vid = vid;
        vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
        vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
@@ -549,8 +587,10 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
        if (flags & BRIDGE_VLAN_INFO_BRENTRY)
                atomic_set(&vlan->refcnt, 1);
        ret = __vlan_add(vlan, flags);
-       if (ret)
+       if (ret) {
+               free_percpu(vlan->stats);
                kfree(vlan);
+       }
 
        return ret;
 }
@@ -651,15 +691,7 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
-       int err;
-
-       if (!rtnl_trylock())
-               return restart_syscall();
-
-       err = __br_vlan_filter_toggle(br, val);
-       rtnl_unlock();
-
-       return err;
+       return __br_vlan_filter_toggle(br, val);
 }
 
 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
@@ -713,18 +745,24 @@ err_filt:
 
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 {
-       int err;
-
        if (val != ETH_P_8021Q && val != ETH_P_8021AD)
                return -EPROTONOSUPPORT;
 
-       if (!rtnl_trylock())
-               return restart_syscall();
+       return __br_vlan_set_proto(br, htons(val));
+}
 
-       err = __br_vlan_set_proto(br, htons(val));
-       rtnl_unlock();
+int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
+{
+       switch (val) {
+       case 0:
+       case 1:
+               br->vlan_stats_enabled = val;
+               break;
+       default:
+               return -EINVAL;
+       }
 
-       return err;
+       return 0;
 }
 
 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
@@ -855,21 +893,17 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
        if (val >= VLAN_VID_MASK)
                return -EINVAL;
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
        if (pvid == br->default_pvid)
-               goto unlock;
+               goto out;
 
        /* Only allow default pvid change when filtering is disabled */
        if (br->vlan_enabled) {
                pr_info_once("Please disable vlan filtering to change default_pvid\n");
                err = -EPERM;
-               goto unlock;
+               goto out;
        }
        err = __br_vlan_set_default_pvid(br, pvid);
-unlock:
-       rtnl_unlock();
+out:
        return err;
 }
 
@@ -1020,3 +1054,30 @@ void nbp_vlan_flush(struct net_bridge_port *port)
        synchronize_rcu();
        __vlan_group_free(vg);
 }
+
+void br_vlan_get_stats(const struct net_bridge_vlan *v,
+                      struct br_vlan_stats *stats)
+{
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+       for_each_possible_cpu(i) {
+               u64 rxpackets, rxbytes, txpackets, txbytes;
+               struct br_vlan_stats *cpu_stats;
+               unsigned int start;
+
+               cpu_stats = per_cpu_ptr(v->stats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       rxpackets = cpu_stats->rx_packets;
+                       rxbytes = cpu_stats->rx_bytes;
+                       txbytes = cpu_stats->tx_bytes;
+                       txpackets = cpu_stats->tx_packets;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+               stats->rx_packets += rxpackets;
+               stats->rx_bytes += rxbytes;
+               stats->tx_bytes += txbytes;
+               stats->tx_packets += txpackets;
+       }
+}
index 8570bc7744c25cc92e2301f8686cd21a1f26baab..5a61f35412a0595845da4feb2cbd0bc09a1cdbeb 100644 (file)
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
            left - sizeof(struct ebt_entry_match) < m->match_size)
                return -EINVAL;
 
-       match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               request_module("ebt_%s", m->u.name);
+               match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       }
        if (IS_ERR(match))
                return PTR_ERR(match);
        m->u.match = match;
index 7fcdd7261d880276f5d21a7b5bbdaf4622dbdfbf..a78c4e2826e5a56d509635e2ca8f9ec4eeac481f 100644 (file)
@@ -162,15 +162,57 @@ static const struct nf_chain_type filter_bridge = {
                          (1 << NF_BR_POST_ROUTING),
 };
 
+static void nf_br_saveroute(const struct sk_buff *skb,
+                           struct nf_queue_entry *entry)
+{
+}
+
+static int nf_br_reroute(struct net *net, struct sk_buff *skb,
+                        const struct nf_queue_entry *entry)
+{
+       return 0;
+}
+
+static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
+                             unsigned int dataoff, u_int8_t protocol)
+{
+       return 0;
+}
+
+static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
+                                     unsigned int dataoff, unsigned int len,
+                                     u_int8_t protocol)
+{
+       return 0;
+}
+
+static int nf_br_route(struct net *net, struct dst_entry **dst,
+                      struct flowi *fl, bool strict __always_unused)
+{
+       return 0;
+}
+
+static const struct nf_afinfo nf_br_afinfo = {
+       .family                 = AF_BRIDGE,
+       .checksum               = nf_br_checksum,
+       .checksum_partial       = nf_br_checksum_partial,
+       .route                  = nf_br_route,
+       .saveroute              = nf_br_saveroute,
+       .reroute                = nf_br_reroute,
+       .route_key_size         = 0,
+};
+
 static int __init nf_tables_bridge_init(void)
 {
        int ret;
 
+       nf_register_afinfo(&nf_br_afinfo);
        nft_register_chain_type(&filter_bridge);
        ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
-       if (ret < 0)
+       if (ret < 0) {
                nft_unregister_chain_type(&filter_bridge);
-
+               nf_unregister_afinfo(&nf_br_afinfo);
+       }
        return ret;
 }
 
@@ -178,6 +220,7 @@ static void __exit nf_tables_bridge_exit(void)
 {
        unregister_pernet_subsys(&nf_tables_bridge_net_ops);
        nft_unregister_chain_type(&filter_bridge);
+       nf_unregister_afinfo(&nf_br_afinfo);
 }
 
 module_init(nf_tables_bridge_init);
index 6b923bcaa2a481a75f0be2024d7b4f54288d26b6..2bc5965fdd1ecf464af37b533ed14cb1fafea83c 100644 (file)
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
 }
 EXPORT_SYMBOL(ceph_auth_create_authorizer);
 
-void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
-                                 struct ceph_authorizer *a)
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
 {
-       mutex_lock(&ac->mutex);
-       if (ac->ops && ac->ops->destroy_authorizer)
-               ac->ops->destroy_authorizer(ac, a);
-       mutex_unlock(&ac->mutex);
+       a->destroy(a);
 }
 EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
 
index 8c93fa8d81bc45b2d4bf1e9e1fd1eb3b96e3de53..5f836f02ae36c1e1356fd0a92b8a676f96fc709f 100644 (file)
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
        struct ceph_auth_none_info *xi = ac->private;
 
        xi->starting = true;
-       xi->built_authorizer = false;
 }
 
 static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
        return xi->starting;
 }
 
+static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
+                                          struct ceph_none_authorizer *au)
+{
+       void *p = au->buf;
+       void *const end = p + sizeof(au->buf);
+       int ret;
+
+       ceph_encode_8_safe(&p, end, 1, e_range);
+       ret = ceph_entity_name_encode(ac->name, &p, end);
+       if (ret < 0)
+               return ret;
+
+       ceph_encode_64_safe(&p, end, ac->global_id, e_range);
+       au->buf_len = p - (void *)au->buf;
+       dout("%s built authorizer len %d\n", __func__, au->buf_len);
+       return 0;
+
+e_range:
+       return -ERANGE;
+}
+
 static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
 {
        return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
        return result;
 }
 
+static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
+{
+       kfree(a);
+}
+
 /*
- * build an 'authorizer' with our entity_name and global_id.  we can
- * reuse a single static copy since it is identical for all services
- * we connect to.
+ * build an 'authorizer' with our entity_name and global_id.  it is
+ * identical for all services we connect to.
  */
 static int ceph_auth_none_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
        struct ceph_auth_handshake *auth)
 {
-       struct ceph_auth_none_info *ai = ac->private;
-       struct ceph_none_authorizer *au = &ai->au;
-       void *p, *end;
+       struct ceph_none_authorizer *au;
        int ret;
 
-       if (!ai->built_authorizer) {
-               p = au->buf;
-               end = p + sizeof(au->buf);
-               ceph_encode_8(&p, 1);
-               ret = ceph_entity_name_encode(ac->name, &p, end - 8);
-               if (ret < 0)
-                       goto bad;
-               ceph_decode_need(&p, end, sizeof(u64), bad2);
-               ceph_encode_64(&p, ac->global_id);
-               au->buf_len = p - (void *)au->buf;
-               ai->built_authorizer = true;
-               dout("built authorizer len %d\n", au->buf_len);
+       au = kmalloc(sizeof(*au), GFP_NOFS);
+       if (!au)
+               return -ENOMEM;
+
+       au->base.destroy = ceph_auth_none_destroy_authorizer;
+
+       ret = ceph_auth_none_build_authorizer(ac, au);
+       if (ret) {
+               kfree(au);
+               return ret;
        }
 
        auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
        auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
 
        return 0;
-
-bad2:
-       ret = -ERANGE;
-bad:
-       return ret;
-}
-
-static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
-                                     struct ceph_authorizer *a)
-{
-       /* nothing to do */
 }
 
 static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
        .build_request = build_request,
        .handle_reply = handle_reply,
        .create_authorizer = ceph_auth_none_create_authorizer,
-       .destroy_authorizer = ceph_auth_none_destroy_authorizer,
 };
 
 int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
                return -ENOMEM;
 
        xi->starting = true;
-       xi->built_authorizer = false;
 
        ac->protocol = CEPH_AUTH_NONE;
        ac->private = xi;
index 059a3ce4b53f48425336bf4b792d2ac6a5c6e849..62021535ae4a8f22c922a3c7a8b0d67cb5361eac 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 struct ceph_none_authorizer {
+       struct ceph_authorizer base;
        char buf[128];
        int buf_len;
        char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
 
 struct ceph_auth_none_info {
        bool starting;
-       bool built_authorizer;
-       struct ceph_none_authorizer au;   /* we only need one; it's static */
 };
 
 int ceph_auth_none_init(struct ceph_auth_client *ac);
index 9e43a315e6622028ba2b61fd8d36ab20305d611b..a0905f04bd13f3250f51de5a7600f4089d3053a4 100644 (file)
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
        return -EAGAIN;
 }
 
+static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
+{
+       struct ceph_x_authorizer *au = (void *)a;
+
+       ceph_x_authorizer_cleanup(au);
+       kfree(au);
+}
+
 static int ceph_x_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
        struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
        if (!au)
                return -ENOMEM;
 
+       au->base.destroy = ceph_x_destroy_authorizer;
+
        ret = ceph_x_build_authorizer(ac, th, au);
        if (ret) {
                kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        return ret;
 }
 
-static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
-                                     struct ceph_authorizer *a)
-{
-       struct ceph_x_authorizer *au = (void *)a;
-
-       ceph_x_authorizer_cleanup(au);
-       kfree(au);
-}
-
-
 static void ceph_x_reset(struct ceph_auth_client *ac)
 {
        struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
        .create_authorizer = ceph_x_create_authorizer,
        .update_authorizer = ceph_x_update_authorizer,
        .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
-       .destroy_authorizer = ceph_x_destroy_authorizer,
        .invalidate_authorizer = ceph_x_invalidate_authorizer,
        .reset =  ceph_x_reset,
        .destroy = ceph_x_destroy,
index 40b1a3cf7397352e4673becccdbbb083c88f04d8..21a5af904bae751391bf7d508dd75b1d2ee426e2 100644 (file)
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
 
 
 struct ceph_x_authorizer {
+       struct ceph_authorizer base;
        struct ceph_crypto_key session_key;
        struct ceph_buffer *buf;
        unsigned int service;
index 1831f63536225bbe120dbc5215b26d2d6518f3fb..a5502898ea33b0e4d74cfa8f12947c1d9466013c 100644 (file)
@@ -269,7 +269,7 @@ static void _ceph_msgr_exit(void)
        }
 
        BUG_ON(zero_page == NULL);
-       page_cache_release(zero_page);
+       put_page(zero_page);
        zero_page = NULL;
 
        ceph_msgr_slab_exit();
@@ -282,7 +282,7 @@ int ceph_msgr_init(void)
 
        BUG_ON(zero_page != NULL);
        zero_page = ZERO_PAGE(0);
-       page_cache_get(zero_page);
+       get_page(zero_page);
 
        /*
         * The number of active work items is limited by the number of
@@ -1602,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con)
 
        dout("%s %p %d left\n", __func__, con, con->out_skip);
        while (con->out_skip > 0) {
-               size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+               size_t size = min(con->out_skip, (int) PAGE_SIZE);
 
                ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
                if (ret <= 0)
index 32355d9d0103a827a82f3bfe27493d9a2b47b339..40a53a70efdffe89085f4cfce89f8d59c5420521 100644 (file)
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
        dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
             atomic_read(&osd->o_ref) - 1);
        if (atomic_dec_and_test(&osd->o_ref)) {
-               struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
-
                if (osd->o_auth.authorizer)
-                       ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+                       ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
                kfree(osd);
        }
 }
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        struct ceph_auth_handshake *auth = &o->o_auth;
 
        if (force_new && auth->authorizer) {
-               ceph_auth_destroy_authorizer(ac, auth->authorizer);
+               ceph_auth_destroy_authorizer(auth->authorizer);
                auth->authorizer = NULL;
        }
        if (!auth->authorizer) {
index c7c220a736e50bc7ec78c48eb4c6e5a80cf1b500..6864007e64fc3236f6d118f8a4a1869255bbba92 100644 (file)
@@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
                size_t bit = pl->room;
                int ret;
 
-               memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
+               memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
                       buf, bit);
                pl->length += bit;
                pl->room -= bit;
@@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
                        return ret;
        }
 
-       memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
+       memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
        pl->length += len;
        pl->room -= len;
        return 0;
index 10297f7a89babc58541d0ee11b016e2531fa3a1e..00d2601407c5dfd5b8e8a94bf5de2160ae29cc38 100644 (file)
@@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages,
                                         loff_t off, size_t len)
 {
        int i = 0;
-       int po = off & ~PAGE_CACHE_MASK;
+       int po = off & ~PAGE_MASK;
        int left = len;
        int l, bad;
 
        while (left > 0) {
-               l = min_t(int, PAGE_CACHE_SIZE-po, left);
+               l = min_t(int, PAGE_SIZE-po, left);
                bad = copy_from_user(page_address(pages[i]) + po, data, l);
                if (bad == l)
                        return -EFAULT;
                data += l - bad;
                left -= l - bad;
                po += l - bad;
-               if (po == PAGE_CACHE_SIZE) {
+               if (po == PAGE_SIZE) {
                        po = 0;
                        i++;
                }
@@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages,
                                    loff_t off, size_t len)
 {
        int i = 0;
-       size_t po = off & ~PAGE_CACHE_MASK;
+       size_t po = off & ~PAGE_MASK;
        size_t left = len;
 
        while (left > 0) {
-               size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               size_t l = min_t(size_t, PAGE_SIZE-po, left);
 
                memcpy(page_address(pages[i]) + po, data, l);
                data += l;
                left -= l;
                po += l;
-               if (po == PAGE_CACHE_SIZE) {
+               if (po == PAGE_SIZE) {
                        po = 0;
                        i++;
                }
@@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages,
                                    loff_t off, size_t len)
 {
        int i = 0;
-       size_t po = off & ~PAGE_CACHE_MASK;
+       size_t po = off & ~PAGE_MASK;
        size_t left = len;
 
        while (left > 0) {
-               size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               size_t l = min_t(size_t, PAGE_SIZE-po, left);
 
                memcpy(data, page_address(pages[i]) + po, l);
                data += l;
                left -= l;
                po += l;
-               if (po == PAGE_CACHE_SIZE) {
+               if (po == PAGE_SIZE) {
                        po = 0;
                        i++;
                }
@@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
  */
 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
 {
-       int i = off >> PAGE_CACHE_SHIFT;
+       int i = off >> PAGE_SHIFT;
 
-       off &= ~PAGE_CACHE_MASK;
+       off &= ~PAGE_MASK;
 
        dout("zero_page_vector_page %u~%u\n", off, len);
 
        /* leading partial page? */
        if (off) {
-               int end = min((int)PAGE_CACHE_SIZE, off + len);
+               int end = min((int)PAGE_SIZE, off + len);
                dout("zeroing %d %p head from %d\n", i, pages[i],
                     (int)off);
                zero_user_segment(pages[i], off, end);
                len -= (end - off);
                i++;
        }
-       while (len >= PAGE_CACHE_SIZE) {
+       while (len >= PAGE_SIZE) {
                dout("zeroing %d %p len=%d\n", i, pages[i], len);
-               zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
-               len -= PAGE_CACHE_SIZE;
+               zero_user_segment(pages[i], 0, PAGE_SIZE);
+               len -= PAGE_SIZE;
                i++;
        }
        /* trailing partial page? */
index 273f10d1e30618a99a172ec96d2bd017486f35d6..12436d1312cabf1596704e70c2586f1623ce38f6 100644 (file)
@@ -1741,7 +1741,7 @@ static inline void net_timestamp_set(struct sk_buff *skb)
                        __net_timestamp(SKB);           \
        }                                               \
 
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
+bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
 {
        unsigned int len;
 
@@ -1850,7 +1850,7 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
  *     taps currently in use.
  */
 
-static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 {
        struct packet_type *ptype;
        struct sk_buff *skb2 = NULL;
@@ -1907,6 +1907,7 @@ out_unlock:
                pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
        rcu_read_unlock();
 }
+EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
 
 /**
  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
@@ -2711,6 +2712,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                        return ERR_PTR(err);
        }
 
+       /* Only report GSO partial support if it will enable us to
+        * support segmentation on this frame without needing additional
+        * work.
+        */
+       if (features & NETIF_F_GSO_PARTIAL) {
+               netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
+               struct net_device *dev = skb->dev;
+
+               partial_features |= dev->features & dev->gso_partial_features;
+               if (!skb_gso_ok(skb, features | partial_features))
+                       features &= ~NETIF_F_GSO_PARTIAL;
+       }
+
        BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
                     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
 
@@ -2802,7 +2816,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
-               features &= ~NETIF_F_CSUM_MASK;
+               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
@@ -2825,14 +2839,45 @@ static netdev_features_t dflt_features_check(const struct sk_buff *skb,
        return vlan_features_check(skb, features);
 }
 
+static netdev_features_t gso_features_check(const struct sk_buff *skb,
+                                           struct net_device *dev,
+                                           netdev_features_t features)
+{
+       u16 gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (gso_segs > dev->gso_max_segs)
+               return features & ~NETIF_F_GSO_MASK;
+
+       /* Support for GSO partial features requires software
+        * intervention before we can actually process the packets
+        * so we need to strip support for any partial features now
+        * and we can pull them back in after we have partially
+        * segmented the frame.
+        */
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
+               features &= ~dev->gso_partial_features;
+
+       /* Make sure to clear the IPv4 ID mangling feature if the
+        * IPv4 header has the potential to be fragmented.
+        */
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+               struct iphdr *iph = skb->encapsulation ?
+                                   inner_ip_hdr(skb) : ip_hdr(skb);
+
+               if (!(iph->frag_off & htons(IP_DF)))
+                       features &= ~NETIF_F_TSO_MANGLEID;
+       }
+
+       return features;
+}
+
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        netdev_features_t features = dev->features;
-       u16 gso_segs = skb_shinfo(skb)->gso_segs;
 
-       if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
-               features &= ~NETIF_F_GSO_MASK;
+       if (skb_is_gso(skb))
+               features = gso_features_check(skb, dev, features);
 
        /* If encapsulation offload request, verify we are testing
         * hardware encapsulation features instead of standard
@@ -2915,9 +2960,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 {
        netdev_features_t features;
 
-       if (skb->next)
-               return skb;
-
        features = netif_skb_features(skb);
        skb = validate_xmit_vlan(skb, features);
        if (unlikely(!skb))
@@ -2960,6 +3002,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 out_kfree_skb:
        kfree_skb(skb);
 out_null:
+       atomic_long_inc(&dev->tx_dropped);
        return NULL;
 }
 
@@ -3349,7 +3392,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 
                        skb = validate_xmit_skb(skb, dev);
                        if (!skb)
-                               goto drop;
+                               goto out;
 
                        HARD_TX_LOCK(dev, txq, cpu);
 
@@ -3376,7 +3419,6 @@ recursion_alert:
        }
 
        rc = -ENETDOWN;
-drop:
        rcu_read_unlock_bh();
 
        atomic_long_inc(&dev->tx_dropped);
@@ -3428,6 +3470,7 @@ u32 rps_cpu_mask __read_mostly;
 EXPORT_SYMBOL(rps_cpu_mask);
 
 struct static_key rps_needed __read_mostly;
+EXPORT_SYMBOL(rps_needed);
 
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -3914,9 +3957,11 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
                break;
        case TC_ACT_SHOT:
                qdisc_qstats_cpu_drop(cl->q);
+               kfree_skb(skb);
+               return NULL;
        case TC_ACT_STOLEN:
        case TC_ACT_QUEUED:
-               kfree_skb(skb);
+               consume_skb(skb);
                return NULL;
        case TC_ACT_REDIRECT:
                /* skb_mac_header check was done by cls/act_bpf, so
@@ -4439,6 +4484,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->encap_mark = 0;
+               NAPI_GRO_CB(skb)->is_fou = 0;
+               NAPI_GRO_CB(skb)->is_atomic = 1;
                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
                /* Setup for GRO checksum validation */
@@ -4939,8 +4986,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
                        netpoll_poll_unlock(have);
                }
                if (rc > 0)
-                       NET_ADD_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+                       __NET_ADD_STATS(sock_net(sk),
+                                       LINUX_MIB_BUSYPOLLRXPACKETS, rc);
                local_bh_enable();
 
                if (rc == LL_FLUSH_FAILED)
@@ -6677,6 +6724,10 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                features &= ~NETIF_F_TSO6;
        }
 
+       /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
+       if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
+               features &= ~NETIF_F_TSO_MANGLEID;
+
        /* TSO ECN requires that TSO is present as well. */
        if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
                features &= ~NETIF_F_TSO_ECN;
@@ -6705,6 +6756,14 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                }
        }
 
+       /* GSO partial features require GSO partial be set */
+       if ((features & dev->gso_partial_features) &&
+           !(features & NETIF_F_GSO_PARTIAL)) {
+               netdev_dbg(dev,
+                          "Dropping partially supported GSO features since no GSO partial.\n");
+               features &= ~dev->gso_partial_features;
+       }
+
 #ifdef CONFIG_NET_RX_BUSY_POLL
        if (dev->netdev_ops->ndo_busy_poll)
                features |= NETIF_F_BUSY_POLL;
@@ -6975,9 +7034,22 @@ int register_netdevice(struct net_device *dev)
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
 
-       if (!(dev->flags & IFF_LOOPBACK)) {
+       if (!(dev->flags & IFF_LOOPBACK))
                dev->hw_features |= NETIF_F_NOCACHE_COPY;
-       }
+
+       /* If IPv4 TCP segmentation offload is supported we should also
+        * allow the device to enable segmenting the frame with the option
+        * of ignoring a static IP ID value.  This doesn't enable the
+        * feature itself but allows the user to enable it later.
+        */
+       if (dev->hw_features & NETIF_F_TSO)
+               dev->hw_features |= NETIF_F_TSO_MANGLEID;
+       if (dev->vlan_features & NETIF_F_TSO)
+               dev->vlan_features |= NETIF_F_TSO_MANGLEID;
+       if (dev->mpls_features & NETIF_F_TSO)
+               dev->mpls_features |= NETIF_F_TSO_MANGLEID;
+       if (dev->hw_enc_features & NETIF_F_TSO)
+               dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
         */
@@ -6985,7 +7057,7 @@ int register_netdevice(struct net_device *dev)
 
        /* Make NETIF_F_SG inheritable to tunnel devices.
         */
-       dev->hw_enc_features |= NETIF_F_SG;
+       dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
 
        /* Make NETIF_F_SG inheritable to MPLS.
         */
@@ -7428,7 +7500,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
-       dev->gso_min_segs = 0;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
index b84cf0df4a0eb0d7e79663103287274eea1229ae..933e8d4d3968a858807616abf68363d4805a455a 100644 (file)
@@ -119,8 +119,171 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
        return devlink_port_get_from_attrs(devlink, info->attrs);
 }
 
+struct devlink_sb {
+       struct list_head list;
+       unsigned int index;
+       u32 size;
+       u16 ingress_pools_count;
+       u16 egress_pools_count;
+       u16 ingress_tc_count;
+       u16 egress_tc_count;
+};
+
+static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
+{
+       return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
+}
+
+static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
+                                                 unsigned int sb_index)
+{
+       struct devlink_sb *devlink_sb;
+
+       list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+               if (devlink_sb->index == sb_index)
+                       return devlink_sb;
+       }
+       return NULL;
+}
+
+static bool devlink_sb_index_exists(struct devlink *devlink,
+                                   unsigned int sb_index)
+{
+       return devlink_sb_get_by_index(devlink, sb_index);
+}
+
+static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
+                                                   struct nlattr **attrs)
+{
+       if (attrs[DEVLINK_ATTR_SB_INDEX]) {
+               u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
+               struct devlink_sb *devlink_sb;
+
+               devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+               if (!devlink_sb)
+                       return ERR_PTR(-ENODEV);
+               return devlink_sb;
+       }
+       return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
+                                                  struct genl_info *info)
+{
+       return devlink_sb_get_from_attrs(devlink, info->attrs);
+}
+
+static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
+                                               struct nlattr **attrs,
+                                               u16 *p_pool_index)
+{
+       u16 val;
+
+       if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
+               return -EINVAL;
+
+       val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
+       if (val >= devlink_sb_pool_count(devlink_sb))
+               return -EINVAL;
+       *p_pool_index = val;
+       return 0;
+}
+
+static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
+                                              struct genl_info *info,
+                                              u16 *p_pool_index)
+{
+       return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
+                                                   p_pool_index);
+}
+
+static int
+devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
+                                   enum devlink_sb_pool_type *p_pool_type)
+{
+       u8 val;
+
+       if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
+               return -EINVAL;
+
+       val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
+       if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
+           val != DEVLINK_SB_POOL_TYPE_EGRESS)
+               return -EINVAL;
+       *p_pool_type = val;
+       return 0;
+}
+
+static int
+devlink_sb_pool_type_get_from_info(struct genl_info *info,
+                                  enum devlink_sb_pool_type *p_pool_type)
+{
+       return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
+}
+
+static int
+devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
+                                 enum devlink_sb_threshold_type *p_th_type)
+{
+       u8 val;
+
+       if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
+               return -EINVAL;
+
+       val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
+       if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
+           val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
+               return -EINVAL;
+       *p_th_type = val;
+       return 0;
+}
+
+static int
+devlink_sb_th_type_get_from_info(struct genl_info *info,
+                                enum devlink_sb_threshold_type *p_th_type)
+{
+       return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
+}
+
+static int
+devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
+                                  struct nlattr **attrs,
+                                  enum devlink_sb_pool_type pool_type,
+                                  u16 *p_tc_index)
+{
+       u16 val;
+
+       if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
+               return -EINVAL;
+
+       val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
+       if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
+           val >= devlink_sb->ingress_tc_count)
+               return -EINVAL;
+       if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
+           val >= devlink_sb->egress_tc_count)
+               return -EINVAL;
+       *p_tc_index = val;
+       return 0;
+}
+
+static int
+devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
+                                 struct genl_info *info,
+                                 enum devlink_sb_pool_type pool_type,
+                                 u16 *p_tc_index)
+{
+       return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
+                                                 pool_type, p_tc_index);
+}
+
 #define DEVLINK_NL_FLAG_NEED_DEVLINK   BIT(0)
 #define DEVLINK_NL_FLAG_NEED_PORT      BIT(1)
+#define DEVLINK_NL_FLAG_NEED_SB                BIT(2)
+#define DEVLINK_NL_FLAG_LOCK_PORTS     BIT(3)
+       /* port is not needed but we need to ensure they don't
+        * change in the middle of command
+        */
 
 static int devlink_nl_pre_doit(const struct genl_ops *ops,
                               struct sk_buff *skb, struct genl_info *info)
@@ -147,13 +310,29 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
                }
                info->user_ptr[0] = devlink_port;
        }
+       if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
+               mutex_lock(&devlink_port_mutex);
+       }
+       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
+               struct devlink_sb *devlink_sb;
+
+               devlink_sb = devlink_sb_get_from_info(devlink, info);
+               if (IS_ERR(devlink_sb)) {
+                       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
+                               mutex_unlock(&devlink_port_mutex);
+                       mutex_unlock(&devlink_mutex);
+                       return PTR_ERR(devlink_sb);
+               }
+               info->user_ptr[1] = devlink_sb;
+       }
        return 0;
 }
 
 static void devlink_nl_post_doit(const struct genl_ops *ops,
                                 struct sk_buff *skb, struct genl_info *info)
 {
-       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
+       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
+           ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
                mutex_unlock(&devlink_port_mutex);
        mutex_unlock(&devlink_mutex);
 }
@@ -499,126 +678,928 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
        return devlink_port_unsplit(devlink, port_index);
 }
 
-static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
-       [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
-       [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
-       [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
-       [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
-};
+static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
+                             struct devlink_sb *devlink_sb,
+                             enum devlink_command cmd, u32 portid,
+                             u32 seq, int flags)
+{
+       void *hdr;
 
-static const struct genl_ops devlink_nl_ops[] = {
-       {
-               .cmd = DEVLINK_CMD_GET,
-               .doit = devlink_nl_cmd_get_doit,
-               .dumpit = devlink_nl_cmd_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_GET,
-               .doit = devlink_nl_cmd_port_get_doit,
-               .dumpit = devlink_nl_cmd_port_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_SET,
-               .doit = devlink_nl_cmd_port_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_SPLIT,
-               .doit = devlink_nl_cmd_port_split_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_UNSPLIT,
-               .doit = devlink_nl_cmd_port_unsplit_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-       },
-};
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
 
-/**
- *     devlink_alloc - Allocate new devlink instance resources
- *
- *     @ops: ops
- *     @priv_size: size of user private data
- *
- *     Allocate new devlink instance resources, including devlink index
- *     and name.
- */
-struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
-{
-       struct devlink *devlink;
+       if (devlink_nl_put_handle(msg, devlink))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
+                       devlink_sb->ingress_pools_count))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
+                       devlink_sb->egress_pools_count))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
+                       devlink_sb->ingress_tc_count))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
+                       devlink_sb->egress_tc_count))
+               goto nla_put_failure;
 
-       devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
-       if (!devlink)
-               return NULL;
-       devlink->ops = ops;
-       devlink_net_set(devlink, &init_net);
-       INIT_LIST_HEAD(&devlink->port_list);
-       return devlink;
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
 }
-EXPORT_SYMBOL_GPL(devlink_alloc);
 
-/**
- *     devlink_register - Register devlink instance
- *
- *     @devlink: devlink
- */
-int devlink_register(struct devlink *devlink, struct device *dev)
+static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
+                                     struct genl_info *info)
 {
-       mutex_lock(&devlink_mutex);
-       devlink->dev = dev;
-       list_add_tail(&devlink->list, &devlink_list);
-       devlink_notify(devlink, DEVLINK_CMD_NEW);
-       mutex_unlock(&devlink_mutex);
-       return 0;
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct sk_buff *msg;
+       int err;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+                                DEVLINK_CMD_SB_NEW,
+                                info->snd_portid, info->snd_seq, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
 }
-EXPORT_SYMBOL_GPL(devlink_register);
 
-/**
- *     devlink_unregister - Unregister devlink instance
- *
- *     @devlink: devlink
- */
-void devlink_unregister(struct devlink *devlink)
+static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
+                                       struct netlink_callback *cb)
 {
+       struct devlink *devlink;
+       struct devlink_sb *devlink_sb;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
        mutex_lock(&devlink_mutex);
-       devlink_notify(devlink, DEVLINK_CMD_DEL);
-       list_del(&devlink->list);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+                       continue;
+               list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+                       if (idx < start) {
+                               idx++;
+                               continue;
+                       }
+                       err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+                                                DEVLINK_CMD_SB_NEW,
+                                                NETLINK_CB(cb->skb).portid,
+                                                cb->nlh->nlmsg_seq,
+                                                NLM_F_MULTI);
+                       if (err)
+                               goto out;
+                       idx++;
+               }
+       }
+out:
        mutex_unlock(&devlink_mutex);
+
+       cb->args[0] = idx;
+       return msg->len;
 }
-EXPORT_SYMBOL_GPL(devlink_unregister);
 
-/**
- *     devlink_free - Free devlink instance resources
- *
- *     @devlink: devlink
- */
-void devlink_free(struct devlink *devlink)
+static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
+                                  struct devlink_sb *devlink_sb,
+                                  u16 pool_index, enum devlink_command cmd,
+                                  u32 portid, u32 seq, int flags)
 {
-       kfree(devlink);
+       struct devlink_sb_pool_info pool_info;
+       void *hdr;
+       int err;
+
+       err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
+                                       pool_index, &pool_info);
+       if (err)
+               return err;
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(msg, devlink))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+               goto nla_put_failure;
+       if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
+               goto nla_put_failure;
+       if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
+                      pool_info.threshold_type))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
 }
-EXPORT_SYMBOL_GPL(devlink_free);
 
-/**
- *     devlink_port_register - Register devlink port
- *
- *     @devlink: devlink
- *     @devlink_port: devlink port
- *     @port_index
- *
- *     Register devlink port with provided port index. User can use
+static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
+                                          struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct sk_buff *msg;
+       u16 pool_index;
+       int err;
+
+       err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+                                                 &pool_index);
+       if (err)
+               return err;
+
+       if (!devlink->ops || !devlink->ops->sb_pool_get)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
+                                     DEVLINK_CMD_SB_POOL_NEW,
+                                     info->snd_portid, info->snd_seq, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+                               struct devlink *devlink,
+                               struct devlink_sb *devlink_sb,
+                               u32 portid, u32 seq)
+{
+       u16 pool_count = devlink_sb_pool_count(devlink_sb);
+       u16 pool_index;
+       int err;
+
+       for (pool_index = 0; pool_index < pool_count; pool_index++) {
+               if (*p_idx < start) {
+                       (*p_idx)++;
+                       continue;
+               }
+               err = devlink_nl_sb_pool_fill(msg, devlink,
+                                             devlink_sb,
+                                             pool_index,
+                                             DEVLINK_CMD_SB_POOL_NEW,
+                                             portid, seq, NLM_F_MULTI);
+               if (err)
+                       return err;
+               (*p_idx)++;
+       }
+       return 0;
+}
+
+static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
+                                            struct netlink_callback *cb)
+{
+       struct devlink *devlink;
+       struct devlink_sb *devlink_sb;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       mutex_lock(&devlink_mutex);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+                   !devlink->ops || !devlink->ops->sb_pool_get)
+                       continue;
+               list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+                       err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
+                                                  devlink_sb,
+                                                  NETLINK_CB(cb->skb).portid,
+                                                  cb->nlh->nlmsg_seq);
+                       if (err && err != -EOPNOTSUPP)
+                               goto out;
+               }
+       }
+out:
+       mutex_unlock(&devlink_mutex);
+
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
+                              u16 pool_index, u32 size,
+                              enum devlink_sb_threshold_type threshold_type)
+
+{
+       const struct devlink_ops *ops = devlink->ops;
+
+       if (ops && ops->sb_pool_set)
+               return ops->sb_pool_set(devlink, sb_index, pool_index,
+                                       size, threshold_type);
+       return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
+                                          struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       enum devlink_sb_threshold_type threshold_type;
+       u16 pool_index;
+       u32 size;
+       int err;
+
+       err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+                                                 &pool_index);
+       if (err)
+               return err;
+
+       err = devlink_sb_th_type_get_from_info(info, &threshold_type);
+       if (err)
+               return err;
+
+       if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE])
+               return -EINVAL;
+
+       size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
+       return devlink_sb_pool_set(devlink, devlink_sb->index,
+                                  pool_index, size, threshold_type);
+}
+
+static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+                                       struct devlink *devlink,
+                                       struct devlink_port *devlink_port,
+                                       struct devlink_sb *devlink_sb,
+                                       u16 pool_index,
+                                       enum devlink_command cmd,
+                                       u32 portid, u32 seq, int flags)
+{
+       const struct devlink_ops *ops = devlink->ops;
+       u32 threshold;
+       void *hdr;
+       int err;
+
+       err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
+                                   pool_index, &threshold);
+       if (err)
+               return err;
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(msg, devlink))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+               goto nla_put_failure;
+
+       if (ops->sb_occ_port_pool_get) {
+               u32 cur;
+               u32 max;
+
+               err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
+                                               pool_index, &cur, &max);
+               if (err && err != -EOPNOTSUPP)
+                       return err;
+               if (!err) {
+                       if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+                               goto nla_put_failure;
+                       if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+                               goto nla_put_failure;
+               }
+       }
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
+                                               struct genl_info *info)
+{
+       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink *devlink = devlink_port->devlink;
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct sk_buff *msg;
+       u16 pool_index;
+       int err;
+
+       err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+                                                 &pool_index);
+       if (err)
+               return err;
+
+       if (!devlink->ops || !devlink->ops->sb_port_pool_get)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
+                                          devlink_sb, pool_index,
+                                          DEVLINK_CMD_SB_PORT_POOL_NEW,
+                                          info->snd_portid, info->snd_seq, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+                                    struct devlink *devlink,
+                                    struct devlink_sb *devlink_sb,
+                                    u32 portid, u32 seq)
+{
+       struct devlink_port *devlink_port;
+       u16 pool_count = devlink_sb_pool_count(devlink_sb);
+       u16 pool_index;
+       int err;
+
+       list_for_each_entry(devlink_port, &devlink->port_list, list) {
+               for (pool_index = 0; pool_index < pool_count; pool_index++) {
+                       if (*p_idx < start) {
+                               (*p_idx)++;
+                               continue;
+                       }
+                       err = devlink_nl_sb_port_pool_fill(msg, devlink,
+                                                          devlink_port,
+                                                          devlink_sb,
+                                                          pool_index,
+                                                          DEVLINK_CMD_SB_PORT_POOL_NEW,
+                                                          portid, seq,
+                                                          NLM_F_MULTI);
+                       if (err)
+                               return err;
+                       (*p_idx)++;
+               }
+       }
+       return 0;
+}
+
+static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
+                                                 struct netlink_callback *cb)
+{
+       struct devlink *devlink;
+       struct devlink_sb *devlink_sb;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       mutex_lock(&devlink_mutex);
+       mutex_lock(&devlink_port_mutex);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+                   !devlink->ops || !devlink->ops->sb_port_pool_get)
+                       continue;
+               list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+                       err = __sb_port_pool_get_dumpit(msg, start, &idx,
+                                                       devlink, devlink_sb,
+                                                       NETLINK_CB(cb->skb).portid,
+                                                       cb->nlh->nlmsg_seq);
+                       if (err && err != -EOPNOTSUPP)
+                               goto out;
+               }
+       }
+out:
+       mutex_unlock(&devlink_port_mutex);
+       mutex_unlock(&devlink_mutex);
+
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+                                   unsigned int sb_index, u16 pool_index,
+                                   u32 threshold)
+
+{
+       const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+       if (ops && ops->sb_port_pool_set)
+               return ops->sb_port_pool_set(devlink_port, sb_index,
+                                            pool_index, threshold);
+       return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
+                                               struct genl_info *info)
+{
+       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       u16 pool_index;
+       u32 threshold;
+       int err;
+
+       err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+                                                 &pool_index);
+       if (err)
+               return err;
+
+       if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
+               return -EINVAL;
+
+       threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+       return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
+                                       pool_index, threshold);
+}
+
+static int
+devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
+                               struct devlink_port *devlink_port,
+                               struct devlink_sb *devlink_sb, u16 tc_index,
+                               enum devlink_sb_pool_type pool_type,
+                               enum devlink_command cmd,
+                               u32 portid, u32 seq, int flags)
+{
+       const struct devlink_ops *ops = devlink->ops;
+       u16 pool_index;
+       u32 threshold;
+       void *hdr;
+       int err;
+
+       err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
+                                      tc_index, pool_type,
+                                      &pool_index, &threshold);
+       if (err)
+               return err;
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(msg, devlink))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
+               goto nla_put_failure;
+       if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+               goto nla_put_failure;
+       if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+               goto nla_put_failure;
+
+       if (ops->sb_occ_tc_port_bind_get) {
+               u32 cur;
+               u32 max;
+
+               err = ops->sb_occ_tc_port_bind_get(devlink_port,
+                                                  devlink_sb->index,
+                                                  tc_index, pool_type,
+                                                  &cur, &max);
+               if (err && err != -EOPNOTSUPP)
+                       return err;
+               if (!err) {
+                       if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+                               goto nla_put_failure;
+                       if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+                               goto nla_put_failure;
+               }
+       }
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+                                                  struct genl_info *info)
+{
+       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink *devlink = devlink_port->devlink;
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct sk_buff *msg;
+       enum devlink_sb_pool_type pool_type;
+       u16 tc_index;
+       int err;
+
+       err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+       if (err)
+               return err;
+
+       err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+                                               pool_type, &tc_index);
+       if (err)
+               return err;
+
+       if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
+                                             devlink_sb, tc_index, pool_type,
+                                             DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+                                             info->snd_portid,
+                                             info->snd_seq, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+                                       int start, int *p_idx,
+                                       struct devlink *devlink,
+                                       struct devlink_sb *devlink_sb,
+                                       u32 portid, u32 seq)
+{
+       struct devlink_port *devlink_port;
+       u16 tc_index;
+       int err;
+
+       list_for_each_entry(devlink_port, &devlink->port_list, list) {
+               for (tc_index = 0;
+                    tc_index < devlink_sb->ingress_tc_count; tc_index++) {
+                       if (*p_idx < start) {
+                               (*p_idx)++;
+                               continue;
+                       }
+                       err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+                                                             devlink_port,
+                                                             devlink_sb,
+                                                             tc_index,
+                                                             DEVLINK_SB_POOL_TYPE_INGRESS,
+                                                             DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+                                                             portid, seq,
+                                                             NLM_F_MULTI);
+                       if (err)
+                               return err;
+                       (*p_idx)++;
+               }
+               for (tc_index = 0;
+                    tc_index < devlink_sb->egress_tc_count; tc_index++) {
+                       if (*p_idx < start) {
+                               (*p_idx)++;
+                               continue;
+                       }
+                       err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+                                                             devlink_port,
+                                                             devlink_sb,
+                                                             tc_index,
+                                                             DEVLINK_SB_POOL_TYPE_EGRESS,
+                                                             DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+                                                             portid, seq,
+                                                             NLM_F_MULTI);
+                       if (err)
+                               return err;
+                       (*p_idx)++;
+               }
+       }
+       return 0;
+}
+
+static int
+devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+                                         struct netlink_callback *cb)
+{
+       struct devlink *devlink;
+       struct devlink_sb *devlink_sb;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       mutex_lock(&devlink_mutex);
+       mutex_lock(&devlink_port_mutex);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+                   !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+                       continue;
+               list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+                       err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
+                                                          devlink,
+                                                          devlink_sb,
+                                                          NETLINK_CB(cb->skb).portid,
+                                                          cb->nlh->nlmsg_seq);
+                       if (err && err != -EOPNOTSUPP)
+                               goto out;
+               }
+       }
+out:
+       mutex_unlock(&devlink_port_mutex);
+       mutex_unlock(&devlink_mutex);
+
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+                                      unsigned int sb_index, u16 tc_index,
+                                      enum devlink_sb_pool_type pool_type,
+                                      u16 pool_index, u32 threshold)
+
+{
+       const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+       if (ops && ops->sb_tc_pool_bind_set)
+               return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
+                                               tc_index, pool_type,
+                                               pool_index, threshold);
+       return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+                                                  struct genl_info *info)
+{
+       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       enum devlink_sb_pool_type pool_type;
+       u16 tc_index;
+       u16 pool_index;
+       u32 threshold;
+       int err;
+
+       err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+       if (err)
+               return err;
+
+       err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+                                               pool_type, &tc_index);
+       if (err)
+               return err;
+
+       err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+                                                 &pool_index);
+       if (err)
+               return err;
+
+       if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
+               return -EINVAL;
+
+       threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+       return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
+                                          tc_index, pool_type,
+                                          pool_index, threshold);
+}
+
+static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
+                                              struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       const struct devlink_ops *ops = devlink->ops;
+
+       if (ops && ops->sb_occ_snapshot)
+               return ops->sb_occ_snapshot(devlink, devlink_sb->index);
+       return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
+                                               struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       const struct devlink_ops *ops = devlink->ops;
+
+       if (ops && ops->sb_occ_max_clear)
+               return ops->sb_occ_max_clear(devlink, devlink_sb->index);
+       return -EOPNOTSUPP;
+}
+
+static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+       [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
+};
+
+static const struct genl_ops devlink_nl_ops[] = {
+       {
+               .cmd = DEVLINK_CMD_GET,
+               .doit = devlink_nl_cmd_get_doit,
+               .dumpit = devlink_nl_cmd_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_GET,
+               .doit = devlink_nl_cmd_port_get_doit,
+               .dumpit = devlink_nl_cmd_port_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_SET,
+               .doit = devlink_nl_cmd_port_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_SPLIT,
+               .doit = devlink_nl_cmd_port_split_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_UNSPLIT,
+               .doit = devlink_nl_cmd_port_unsplit_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_GET,
+               .doit = devlink_nl_cmd_sb_get_doit,
+               .dumpit = devlink_nl_cmd_sb_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_POOL_GET,
+               .doit = devlink_nl_cmd_sb_pool_get_doit,
+               .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_POOL_SET,
+               .doit = devlink_nl_cmd_sb_pool_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+               .doit = devlink_nl_cmd_sb_port_pool_get_doit,
+               .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+               .doit = devlink_nl_cmd_sb_port_pool_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+               .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
+               .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+               .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+               .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB |
+                                 DEVLINK_NL_FLAG_LOCK_PORTS,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+               .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB |
+                                 DEVLINK_NL_FLAG_LOCK_PORTS,
+       },
+};
+
+/**
+ *     devlink_alloc - Allocate new devlink instance resources
+ *
+ *     @ops: ops
+ *     @priv_size: size of user private data
+ *
+ *     Allocate new devlink instance resources, including devlink index
+ *     and name.
+ */
+struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
+{
+       struct devlink *devlink;
+
+       devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+       if (!devlink)
+               return NULL;
+       devlink->ops = ops;
+       devlink_net_set(devlink, &init_net);
+       INIT_LIST_HEAD(&devlink->port_list);
+       INIT_LIST_HEAD(&devlink->sb_list);
+       return devlink;
+}
+EXPORT_SYMBOL_GPL(devlink_alloc);
+
+/**
+ *     devlink_register - Register devlink instance
+ *
+ *     @devlink: devlink
+ */
+int devlink_register(struct devlink *devlink, struct device *dev)
+{
+       mutex_lock(&devlink_mutex);
+       devlink->dev = dev;
+       list_add_tail(&devlink->list, &devlink_list);
+       devlink_notify(devlink, DEVLINK_CMD_NEW);
+       mutex_unlock(&devlink_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_register);
+
+/**
+ *     devlink_unregister - Unregister devlink instance
+ *
+ *     @devlink: devlink
+ */
+void devlink_unregister(struct devlink *devlink)
+{
+       mutex_lock(&devlink_mutex);
+       devlink_notify(devlink, DEVLINK_CMD_DEL);
+       list_del(&devlink->list);
+       mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_unregister);
+
+/**
+ *     devlink_free - Free devlink instance resources
+ *
+ *     @devlink: devlink
+ */
+void devlink_free(struct devlink *devlink)
+{
+       kfree(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_free);
+
+/**
+ *     devlink_port_register - Register devlink port
+ *
+ *     @devlink: devlink
+ *     @devlink_port: devlink port
+ *     @port_index
+ *
+ *     Register devlink port with provided port index. User can use
  *     any indexing, even hw-related one. devlink_port structure
  *     is convenient to be embedded inside user driver private structure.
  *     Note that the caller should take care of zeroing the devlink_port
@@ -721,6 +1702,51 @@ void devlink_port_split_set(struct devlink_port *devlink_port,
 }
 EXPORT_SYMBOL_GPL(devlink_port_split_set);
 
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+                       u32 size, u16 ingress_pools_count,
+                       u16 egress_pools_count, u16 ingress_tc_count,
+                       u16 egress_tc_count)
+{
+       struct devlink_sb *devlink_sb;
+       int err = 0;
+
+       mutex_lock(&devlink_mutex);
+       if (devlink_sb_index_exists(devlink, sb_index)) {
+               err = -EEXIST;
+               goto unlock;
+       }
+
+       devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
+       if (!devlink_sb) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+       devlink_sb->index = sb_index;
+       devlink_sb->size = size;
+       devlink_sb->ingress_pools_count = ingress_pools_count;
+       devlink_sb->egress_pools_count = egress_pools_count;
+       devlink_sb->ingress_tc_count = ingress_tc_count;
+       devlink_sb->egress_tc_count = egress_tc_count;
+       list_add_tail(&devlink_sb->list, &devlink->sb_list);
+unlock:
+       mutex_unlock(&devlink_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(devlink_sb_register);
+
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+{
+       struct devlink_sb *devlink_sb;
+
+       mutex_lock(&devlink_mutex);
+       devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+       WARN_ON(!devlink_sb);
+       list_del(&devlink_sb->list);
+       mutex_unlock(&devlink_mutex);
+       kfree(devlink_sb);
+}
+EXPORT_SYMBOL_GPL(devlink_sb_unregister);
+
 static int __init devlink_module_init(void)
 {
        return genl_register_family_with_ops_groups(&devlink_nl_family,
index f426c5ad6149deefb55f9aa9fccd3b1392206357..bdb4013581b1ada73a5957f250c821cca1efb65d 100644 (file)
@@ -79,12 +79,16 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
        [NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
        [NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+       [NETIF_F_TSO_MANGLEID_BIT] =     "tx-tcp-mangleid-segmentation",
        [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
        [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
+       [NETIF_F_GSO_GRE_CSUM_BIT] =     "tx-gre-csum-segmentation",
        [NETIF_F_GSO_IPIP_BIT] =         "tx-ipip-segmentation",
        [NETIF_F_GSO_SIT_BIT] =          "tx-sit-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
+       [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
+       [NETIF_F_GSO_PARTIAL_BIT] =      "tx-gso-partial",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CRC_BIT] =        "tx-checksum-sctp",
@@ -387,15 +391,17 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data)
        return 0;
 }
 
-static void convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32)
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+                                            u32 legacy_u32)
 {
        bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
        dst[0] = legacy_u32;
 }
+EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode);
 
 /* return false if src had higher bits set. lower bits always updated. */
-static bool convert_link_mode_to_legacy_u32(u32 *legacy_u32,
-                                           const unsigned long *src)
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+                                            const unsigned long *src)
 {
        bool retval = true;
 
@@ -415,6 +421,7 @@ static bool convert_link_mode_to_legacy_u32(u32 *legacy_u32,
        *legacy_u32 = src[0];
        return retval;
 }
+EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
 
 /* return false if legacy contained non-0 deprecated fields
  * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
@@ -437,13 +444,13 @@ convert_legacy_settings_to_link_ksettings(
            legacy_settings->maxrxpkt)
                retval = false;
 
-       convert_legacy_u32_to_link_mode(
+       ethtool_convert_legacy_u32_to_link_mode(
                link_ksettings->link_modes.supported,
                legacy_settings->supported);
-       convert_legacy_u32_to_link_mode(
+       ethtool_convert_legacy_u32_to_link_mode(
                link_ksettings->link_modes.advertising,
                legacy_settings->advertising);
-       convert_legacy_u32_to_link_mode(
+       ethtool_convert_legacy_u32_to_link_mode(
                link_ksettings->link_modes.lp_advertising,
                legacy_settings->lp_advertising);
        link_ksettings->base.speed
@@ -482,13 +489,13 @@ convert_link_ksettings_to_legacy_settings(
         * __u32        maxrxpkt;
         */
 
-       retval &= convert_link_mode_to_legacy_u32(
+       retval &= ethtool_convert_link_mode_to_legacy_u32(
                &legacy_settings->supported,
                link_ksettings->link_modes.supported);
-       retval &= convert_link_mode_to_legacy_u32(
+       retval &= ethtool_convert_link_mode_to_legacy_u32(
                &legacy_settings->advertising,
                link_ksettings->link_modes.advertising);
-       retval &= convert_link_mode_to_legacy_u32(
+       retval &= ethtool_convert_link_mode_to_legacy_u32(
                &legacy_settings->lp_advertising,
                link_ksettings->link_modes.lp_advertising);
        ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed);
index 365de66436aca8dba3868aa565d4cb77353b58d1..840acebbb80cdf238c32ba3684bef94e19a7c51a 100644 (file)
@@ -549,7 +549,7 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
                         + nla_total_size(4) /* FRA_FWMARK */
                         + nla_total_size(4) /* FRA_FWMASK */
-                        + nla_total_size(8); /* FRA_TUN_ID */
+                        + nla_total_size_64bit(8); /* FRA_TUN_ID */
 
        if (ops->nlmsg_payload)
                payload += ops->nlmsg_payload(rule);
@@ -607,7 +607,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            (rule->target &&
             nla_put_u32(skb, FRA_GOTO, rule->target)) ||
            (rule->tun_id &&
-            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
+            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)))
                goto nla_put_failure;
 
        if (rule->suppress_ifgroup != -1) {
index e8486ba601eae71c9f54416cfd1a4dc1945ecb4c..71c2a1f473adb82a77974cc377f88b5c26f5d42a 100644 (file)
@@ -1344,6 +1344,21 @@ struct bpf_scratchpad {
 
 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
+static inline int bpf_try_make_writable(struct sk_buff *skb,
+                                       unsigned int write_len)
+{
+       int err;
+
+       if (!skb_cloned(skb))
+               return 0;
+       if (skb_clone_writable(skb, write_len))
+               return 0;
+       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+       if (!err)
+               bpf_compute_data_end(skb);
+       return err;
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
        struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1366,7 +1381,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
         */
        if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + len)))
+       if (unlikely(bpf_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1409,16 +1424,19 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        unsigned int len = (unsigned int) r4;
        void *ptr;
 
-       if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
-               return -EFAULT;
+       if (unlikely((u32) offset > 0xffff))
+               goto err_clear;
 
        ptr = skb_header_pointer(skb, offset, len, to);
        if (unlikely(!ptr))
-               return -EFAULT;
+               goto err_clear;
        if (ptr != to)
                memcpy(to, ptr, len);
 
        return 0;
+err_clear:
+       memset(to, 0, len);
+       return -EFAULT;
 }
 
 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
@@ -1427,7 +1445,7 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_ANYTHING,
-       .arg3_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_PTR_TO_RAW_STACK,
        .arg4_type      = ARG_CONST_STACK_SIZE,
 };
 
@@ -1441,7 +1459,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1496,7 +1514,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1696,12 +1714,15 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        __be16 vlan_proto = (__force __be16) r2;
+       int ret;
 
        if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
                     vlan_proto != htons(ETH_P_8021AD)))
                vlan_proto = htons(ETH_P_8021Q);
 
-       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+       ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
@@ -1717,8 +1738,11 @@ EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
 static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       int ret;
 
-       return skb_vlan_pop(skb);
+       ret = skb_vlan_pop(skb);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
@@ -1756,12 +1780,19 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
        u8 compat[sizeof(struct bpf_tunnel_key)];
+       void *to_orig = to;
+       int err;
 
-       if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6))))
-               return -EINVAL;
-       if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags))
-               return -EPROTO;
+       if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
+               err = -EINVAL;
+               goto err_clear;
+       }
+       if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
+               err = -EPROTO;
+               goto err_clear;
+       }
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
+               err = -EINVAL;
                switch (size) {
                case offsetof(struct bpf_tunnel_key, tunnel_label):
                case offsetof(struct bpf_tunnel_key, tunnel_ext):
@@ -1771,12 +1802,12 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                         * a common path later on.
                         */
                        if (ip_tunnel_info_af(info) != AF_INET)
-                               return -EINVAL;
+                               goto err_clear;
 set_compat:
                        to = (struct bpf_tunnel_key *)compat;
                        break;
                default:
-                       return -EINVAL;
+                       goto err_clear;
                }
        }
 
@@ -1793,9 +1824,12 @@ set_compat:
        }
 
        if (unlikely(size != sizeof(struct bpf_tunnel_key)))
-               memcpy((void *)(long) r2, to, size);
+               memcpy(to_orig, to, size);
 
        return 0;
+err_clear:
+       memset(to_orig, 0, size);
+       return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
@@ -1803,7 +1837,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_PTR_TO_RAW_STACK,
        .arg3_type      = ARG_CONST_STACK_SIZE,
        .arg4_type      = ARG_ANYTHING,
 };
@@ -1813,16 +1847,26 @@ static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        u8 *to = (u8 *) (long) r2;
        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       int err;
 
        if (unlikely(!info ||
-                    !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
-               return -ENOENT;
-       if (unlikely(size < info->options_len))
-               return -ENOMEM;
+                    !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
+               err = -ENOENT;
+               goto err_clear;
+       }
+       if (unlikely(size < info->options_len)) {
+               err = -ENOMEM;
+               goto err_clear;
+       }
 
        ip_tunnel_info_opts_get(to, info);
+       if (size > info->options_len)
+               memset(to + info->options_len, 0, size - info->options_len);
 
        return info->options_len;
+err_clear:
+       memset(to, 0, size);
+       return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
@@ -1830,7 +1874,7 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_PTR_TO_RAW_STACK,
        .arg3_type      = ARG_CONST_STACK_SIZE,
 };
 
@@ -2016,6 +2060,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_redirect_proto;
        case BPF_FUNC_get_route_realm:
                return &bpf_get_route_realm_proto;
+       case BPF_FUNC_perf_event_output:
+               return bpf_get_event_output_proto();
        default:
                return sk_filter_func_proto(func_id);
        }
@@ -2041,8 +2087,12 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 static bool sk_filter_is_valid_access(int off, int size,
                                      enum bpf_access_type type)
 {
-       if (off == offsetof(struct __sk_buff, tc_classid))
+       switch (off) {
+       case offsetof(struct __sk_buff, tc_classid):
+       case offsetof(struct __sk_buff, data):
+       case offsetof(struct __sk_buff, data_end):
                return false;
+       }
 
        if (type == BPF_WRITE) {
                switch (off) {
@@ -2190,6 +2240,20 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                        *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
                break;
 
+       case offsetof(struct __sk_buff, data):
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, data));
+               break;
+
+       case offsetof(struct __sk_buff, data_end):
+               ctx_off -= offsetof(struct __sk_buff, data_end);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct bpf_skb_data_end, data_end);
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
+                                     dst_reg, src_reg, ctx_off);
+               break;
+
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
index 1033725be40bd8f254ce27680e3b8abd09ad1546..3937b1b68d5bc7ad50691716ac1612332a5dc997 100644 (file)
@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
        list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
        spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 
-       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
                flow_entry_kill(fce, xfrm);
+               atomic_dec(&xfrm->flow_cache_gc_count);
+               WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+       }
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
                                     struct netns_xfrm *xfrm)
 {
        if (deleted) {
+               atomic_add(deleted, &xfrm->flow_cache_gc_count);
                fcp->hash_count -= deleted;
                spin_lock_bh(&xfrm->flow_cache_gc_lock);
                list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                if (fcp->hash_count > fc->high_watermark)
                        flow_cache_shrink(fc, fcp);
 
+               if (fcp->hash_count > 2 * fc->high_watermark ||
+                   atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+                       atomic_inc(&net->xfrm.flow_cache_genid);
+                       flo = ERR_PTR(-ENOBUFS);
+                       goto ret_object;
+               }
+
                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
                        fle->net = net;
@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
        INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
        INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
        mutex_init(&net->xfrm.flow_flush_sem);
+       atomic_set(&net->xfrm.flow_cache_gc_count, 0);
 
        fc->hash_shift = 10;
        fc->low_watermark = 2 * flow_cache_hash_size(fc);
index e640462ea8bf55bdd1645bbe9a68f459c2afe271..f96ee8b9478d8a752e78e1ca39c2351f5863f054 100644 (file)
@@ -25,9 +25,9 @@
 
 
 static inline int
-gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
+gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
 {
-       if (nla_put(d->skb, type, size, buf))
+       if (nla_put_64bit(d->skb, type, size, buf, padattr))
                goto nla_put_failure;
        return 0;
 
@@ -59,7 +59,8 @@ nla_put_failure:
  */
 int
 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
-       int xstats_type, spinlock_t *lock, struct gnet_dump *d)
+                            int xstats_type, spinlock_t *lock,
+                            struct gnet_dump *d, int padattr)
        __acquires(lock)
 {
        memset(d, 0, sizeof(*d));
@@ -71,16 +72,17 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
        d->skb = skb;
        d->compat_tc_stats = tc_stats_type;
        d->compat_xstats = xstats_type;
+       d->padattr = padattr;
 
        if (d->tail)
-               return gnet_stats_copy(d, type, NULL, 0);
+               return gnet_stats_copy(d, type, NULL, 0, padattr);
 
        return 0;
 }
 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
 
 /**
- * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
+ * gnet_stats_start_copy - start dumping procedure in compatibility mode
  * @skb: socket buffer to put statistics TLVs into
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
@@ -94,9 +96,9 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
  */
 int
 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
-       struct gnet_dump *d)
+                     struct gnet_dump *d, int padattr)
 {
-       return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
+       return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
 }
 EXPORT_SYMBOL(gnet_stats_start_copy);
 
@@ -169,7 +171,8 @@ gnet_stats_copy_basic(struct gnet_dump *d,
                memset(&sb, 0, sizeof(sb));
                sb.bytes = bstats.bytes;
                sb.packets = bstats.packets;
-               return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
+               return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb),
+                                      TCA_STATS_PAD);
        }
        return 0;
 }
@@ -208,11 +211,13 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
        }
 
        if (d->tail) {
-               res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est));
+               res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
+                                     TCA_STATS_PAD);
                if (res < 0 || est.bps == r->bps)
                        return res;
                /* emit 64bit stats only if needed */
-               return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r));
+               return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r),
+                                      TCA_STATS_PAD);
        }
 
        return 0;
@@ -286,7 +291,8 @@ gnet_stats_copy_queue(struct gnet_dump *d,
 
        if (d->tail)
                return gnet_stats_copy(d, TCA_STATS_QUEUE,
-                                      &qstats, sizeof(qstats));
+                                      &qstats, sizeof(qstats),
+                                      TCA_STATS_PAD);
 
        return 0;
 }
@@ -316,7 +322,8 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
        }
 
        if (d->tail)
-               return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+               return gnet_stats_copy(d, TCA_STATS_APP, st, len,
+                                      TCA_STATS_PAD);
 
        return 0;
 
@@ -347,12 +354,12 @@ gnet_stats_finish_copy(struct gnet_dump *d)
 
        if (d->compat_tc_stats)
                if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
-                       sizeof(d->tc_stats)) < 0)
+                                   sizeof(d->tc_stats), d->padattr) < 0)
                        return -1;
 
        if (d->compat_xstats && d->xstats) {
                if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
-                       d->xstats_len) < 0)
+                                   d->xstats_len, d->padattr) < 0)
                        return -1;
        }
 
index f18ae91b652e971ccba5c03177301f1a46a8da57..29dd8cc22bbf87a05d555ff16d62e6dee0b536d4 100644 (file)
@@ -1763,21 +1763,22 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
                        NEIGH_VAR(parms, MCAST_PROBES)) ||
            nla_put_u32(skb, NDTPA_MCAST_REPROBES,
                        NEIGH_VAR(parms, MCAST_REPROBES)) ||
-           nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+           nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
+                         NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
-                         NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
+                         NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_GC_STALETIME,
-                         NEIGH_VAR(parms, GC_STALETIME)) ||
+                         NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
-                         NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
+                         NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_RETRANS_TIME,
-                         NEIGH_VAR(parms, RETRANS_TIME)) ||
+                         NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
-                         NEIGH_VAR(parms, ANYCAST_DELAY)) ||
+                         NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_PROXY_DELAY,
-                         NEIGH_VAR(parms, PROXY_DELAY)) ||
+                         NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
            nla_put_msecs(skb, NDTPA_LOCKTIME,
-                         NEIGH_VAR(parms, LOCKTIME)))
+                         NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
                goto nla_put_failure;
        return nla_nest_end(skb, nest);
 
@@ -1804,7 +1805,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
        ndtmsg->ndtm_pad2   = 0;
 
        if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
-           nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+           nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
            nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
            nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
            nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
@@ -1856,7 +1857,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        ndst.ndts_table_fulls           += st->table_fulls;
                }
 
-               if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+               if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
+                                 NDTA_PAD))
                        goto nla_put_failure;
        }
 
index 2bf83299600a4ac8eb56069295bbc755709642b1..14d09345f00d96ca6a54328c3da2564a5d51044a 100644 (file)
@@ -162,7 +162,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
                   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
                   sd->processed, sd->dropped, sd->time_squeeze, 0,
                   0, 0, 0, 0, /* was fastroute */
-                  sd->cpu_collision, sd->received_rps, flow_limit_count);
+                  0,   /* was cpu_collision */
+                  sd->received_rps, flow_limit_count);
        return 0;
 }
 
index a57bd17805b494be077f1c9d0d358ae7373ee18f..94acfc89ad976da245bed577511bd7a1c079f6f5 100644 (file)
@@ -603,6 +603,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
        const struct net_device_ops *ops;
        int err;
 
+       np->dev = ndev;
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
        INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
@@ -669,7 +670,6 @@ int netpoll_setup(struct netpoll *np)
                goto unlock;
        }
        dev_hold(ndev);
-       np->dev = ndev;
 
        if (netdev_master_upper_dev_get(ndev)) {
                np_err(np, "%s is a slave device, aborting\n", np->dev_name);
@@ -770,7 +770,6 @@ int netpoll_setup(struct netpoll *np)
        return 0;
 
 put:
-       np->dev = NULL;
        dev_put(ndev);
 unlock:
        rtnl_unlock();
index 20999aa596dd0296bbfec23f1ea5dda0a4adf244..8604ae2459608f36c2666fed3b2db2d06eda98af 100644 (file)
@@ -3472,7 +3472,6 @@ xmit_more:
                                     pkt_dev->odevname, ret);
                pkt_dev->errors++;
                /* fallthru */
-       case NETDEV_TX_LOCKED:
        case NETDEV_TX_BUSY:
                /* Retry it next time */
                atomic_dec(&(pkt_dev->skb->users));
index a75f7e94b4456eed264a8d52c460f0b6bbf72569..d69c4644f8f2ca5a4e1bda711672af2dafcf604c 100644 (file)
@@ -808,11 +808,6 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
        a->rx_nohandler = b->rx_nohandler;
 }
 
-static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
-{
-       memcpy(v, b, sizeof(*b));
-}
-
 /* All VF info */
 static inline int rtnl_vfinfo_size(const struct net_device *dev,
                                   u32 ext_filter_mask)
@@ -830,17 +825,17 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
                         nla_total_size(sizeof(struct ifla_vf_link_state)) +
                         nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
                         /* IFLA_VF_STATS_RX_PACKETS */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_TX_PACKETS */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_RX_BYTES */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_TX_BYTES */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_BROADCAST */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         /* IFLA_VF_STATS_MULTICAST */
-                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size_64bit(sizeof(__u64)) +
                         nla_total_size(sizeof(struct ifla_vf_trust)));
                return size;
        } else
@@ -881,9 +876,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
               + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
               + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
-              + nla_total_size(sizeof(struct rtnl_link_ifmap))
+              + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
               + nla_total_size(sizeof(struct rtnl_link_stats))
-              + nla_total_size(sizeof(struct rtnl_link_stats64))
+              + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
               + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
               + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
               + nla_total_size(4) /* IFLA_TXQLEN */
@@ -1054,25 +1049,23 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
                                              struct net_device *dev)
 {
-       const struct rtnl_link_stats64 *stats;
-       struct rtnl_link_stats64 temp;
+       struct rtnl_link_stats64 *sp;
        struct nlattr *attr;
 
-       stats = dev_get_stats(dev, &temp);
-
-       attr = nla_reserve(skb, IFLA_STATS,
-                          sizeof(struct rtnl_link_stats));
+       attr = nla_reserve_64bit(skb, IFLA_STATS64,
+                                sizeof(struct rtnl_link_stats64), IFLA_PAD);
        if (!attr)
                return -EMSGSIZE;
 
-       copy_rtnl_link_stats(nla_data(attr), stats);
+       sp = nla_data(attr);
+       dev_get_stats(dev, sp);
 
-       attr = nla_reserve(skb, IFLA_STATS64,
-                          sizeof(struct rtnl_link_stats64));
+       attr = nla_reserve(skb, IFLA_STATS,
+                          sizeof(struct rtnl_link_stats));
        if (!attr)
                return -EMSGSIZE;
 
-       copy_rtnl_link_stats64(nla_data(attr), stats);
+       copy_rtnl_link_stats(nla_data(attr), sp);
 
        return 0;
 }
@@ -1160,18 +1153,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
                nla_nest_cancel(skb, vfinfo);
                return -EMSGSIZE;
        }
-       if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
-                       vf_stats.rx_packets) ||
-           nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
-                       vf_stats.tx_packets) ||
-           nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
-                       vf_stats.rx_bytes) ||
-           nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
-                       vf_stats.tx_bytes) ||
-           nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
-                       vf_stats.broadcast) ||
-           nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
-                       vf_stats.multicast))
+       if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+                             vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+           nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+                             vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+           nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+                             vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+           nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+                             vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+           nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+                             vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+           nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+                             vf_stats.multicast, IFLA_VF_STATS_PAD))
                return -EMSGSIZE;
        nla_nest_end(skb, vfstats);
        nla_nest_end(skb, vf);
@@ -1180,15 +1173,17 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 
 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
 {
-       struct rtnl_link_ifmap map = {
-               .mem_start   = dev->mem_start,
-               .mem_end     = dev->mem_end,
-               .base_addr   = dev->base_addr,
-               .irq         = dev->irq,
-               .dma         = dev->dma,
-               .port        = dev->if_port,
-       };
-       if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+       struct rtnl_link_ifmap map;
+
+       memset(&map, 0, sizeof(map));
+       map.mem_start   = dev->mem_start;
+       map.mem_end     = dev->mem_end;
+       map.base_addr   = dev->base_addr;
+       map.irq         = dev->irq;
+       map.dma         = dev->dma;
+       map.port        = dev->if_port;
+
+       if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
                return -EMSGSIZE;
 
        return 0;
@@ -3451,6 +3446,202 @@ out:
        return err;
 }
 
+static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
+{
+       return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
+              (!idxattr || idxattr == attrid);
+}
+
+static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
+                              int type, u32 pid, u32 seq, u32 change,
+                              unsigned int flags, unsigned int filter_mask,
+                              int *idxattr, int *prividx)
+{
+       struct if_stats_msg *ifsm;
+       struct nlmsghdr *nlh;
+       struct nlattr *attr;
+       int s_prividx = *prividx;
+
+       ASSERT_RTNL();
+
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       ifsm = nlmsg_data(nlh);
+       ifsm->ifindex = dev->ifindex;
+       ifsm->filter_mask = filter_mask;
+
+       if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
+               struct rtnl_link_stats64 *sp;
+
+               attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
+                                        sizeof(struct rtnl_link_stats64),
+                                        IFLA_STATS_UNSPEC);
+               if (!attr)
+                       goto nla_put_failure;
+
+               sp = nla_data(attr);
+               dev_get_stats(dev, sp);
+       }
+
+       if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
+               const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+
+               if (ops && ops->fill_linkxstats) {
+                       int err;
+
+                       *idxattr = IFLA_STATS_LINK_XSTATS;
+                       attr = nla_nest_start(skb,
+                                             IFLA_STATS_LINK_XSTATS);
+                       if (!attr)
+                               goto nla_put_failure;
+
+                       err = ops->fill_linkxstats(skb, dev, prividx);
+                       nla_nest_end(skb, attr);
+                       if (err)
+                               goto nla_put_failure;
+                       *idxattr = 0;
+               }
+       }
+
+       nlmsg_end(skb, nlh);
+
+       return 0;
+
+nla_put_failure:
+       /* not a multi message or no progress mean a real error */
+       if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
+               nlmsg_cancel(skb, nlh);
+       else
+               nlmsg_end(skb, nlh);
+
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = {
+       [IFLA_STATS_LINK_64]    = { .len = sizeof(struct rtnl_link_stats64) },
+};
+
+static size_t if_nlmsg_stats_size(const struct net_device *dev,
+                                 u32 filter_mask)
+{
+       size_t size = 0;
+
+       if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
+               size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
+
+       if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
+               const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+
+               if (ops && ops->get_linkxstats_size) {
+                       size += nla_total_size(ops->get_linkxstats_size(dev));
+                       /* for IFLA_STATS_LINK_XSTATS */
+                       size += nla_total_size(0);
+               }
+       }
+
+       return size;
+}
+
+static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct net *net = sock_net(skb->sk);
+       struct net_device *dev = NULL;
+       int idxattr = 0, prividx = 0;
+       struct if_stats_msg *ifsm;
+       struct sk_buff *nskb;
+       u32 filter_mask;
+       int err;
+
+       ifsm = nlmsg_data(nlh);
+       if (ifsm->ifindex > 0)
+               dev = __dev_get_by_index(net, ifsm->ifindex);
+       else
+               return -EINVAL;
+
+       if (!dev)
+               return -ENODEV;
+
+       filter_mask = ifsm->filter_mask;
+       if (!filter_mask)
+               return -EINVAL;
+
+       nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
+       if (!nskb)
+               return -ENOBUFS;
+
+       err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
+                                 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+                                 0, filter_mask, &idxattr, &prividx);
+       if (err < 0) {
+               /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(nskb);
+       } else {
+               err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
+       }
+
+       return err;
+}
+
+static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       int h, s_h, err, s_idx, s_idxattr, s_prividx;
+       struct net *net = sock_net(skb->sk);
+       unsigned int flags = NLM_F_MULTI;
+       struct if_stats_msg *ifsm;
+       struct hlist_head *head;
+       struct net_device *dev;
+       u32 filter_mask = 0;
+       int idx = 0;
+
+       s_h = cb->args[0];
+       s_idx = cb->args[1];
+       s_idxattr = cb->args[2];
+       s_prividx = cb->args[3];
+
+       cb->seq = net->dev_base_seq;
+
+       ifsm = nlmsg_data(cb->nlh);
+       filter_mask = ifsm->filter_mask;
+       if (!filter_mask)
+               return -EINVAL;
+
+       for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+               idx = 0;
+               head = &net->dev_index_head[h];
+               hlist_for_each_entry(dev, head, index_hlist) {
+                       if (idx < s_idx)
+                               goto cont;
+                       err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
+                                                 NETLINK_CB(cb->skb).portid,
+                                                 cb->nlh->nlmsg_seq, 0,
+                                                 flags, filter_mask,
+                                                 &s_idxattr, &s_prividx);
+                       /* If we ran out of room on the first message,
+                        * we're in trouble
+                        */
+                       WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+                       if (err < 0)
+                               goto out;
+                       s_prividx = 0;
+                       s_idxattr = 0;
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+                       idx++;
+               }
+       }
+out:
+       cb->args[3] = s_prividx;
+       cb->args[2] = s_idxattr;
+       cb->args[1] = idx;
+       cb->args[0] = h;
+
+       return skb->len;
+}
+
 /* Process one rtnetlink message. */
 
 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -3600,4 +3791,7 @@ void __init rtnetlink_init(void)
        rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
        rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
        rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
+
+       rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
+                     NULL);
 }
index d04c2d1c8c87d79e89fa9f2a4de97ea9262a7787..f2b77e549c03a771909cd9c87c40ec2b7826cd31 100644 (file)
@@ -3076,11 +3076,11 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        struct sk_buff *frag_skb = head_skb;
        unsigned int offset = doffset;
        unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+       unsigned int partial_segs = 0;
        unsigned int headroom;
-       unsigned int len;
+       unsigned int len = head_skb->len;
        __be16 proto;
-       bool csum;
-       int sg = !!(features & NETIF_F_SG);
+       bool csum, sg;
        int nfrags = skb_shinfo(head_skb)->nr_frags;
        int err = -ENOMEM;
        int i = 0;
@@ -3092,8 +3092,21 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        if (unlikely(!proto))
                return ERR_PTR(-EINVAL);
 
+       sg = !!(features & NETIF_F_SG);
        csum = !!can_checksum_protocol(features, proto);
 
+       /* GSO partial only requires that we trim off any excess that
+        * doesn't fit into an MSS sized block, so take care of that
+        * now.
+        */
+       if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
+               partial_segs = len / mss;
+               if (partial_segs > 1)
+                       mss *= partial_segs;
+               else
+                       partial_segs = 0;
+       }
+
        headroom = skb_headroom(head_skb);
        pos = skb_headlen(head_skb);
 
@@ -3281,6 +3294,23 @@ perform_csum_check:
         */
        segs->prev = tail;
 
+       /* Update GSO info on first skb in partial sequence. */
+       if (partial_segs) {
+               int type = skb_shinfo(head_skb)->gso_type;
+
+               /* Update type to add partial and then remove dodgy if set */
+               type |= SKB_GSO_PARTIAL;
+               type &= ~SKB_GSO_DODGY;
+
+               /* Update GSO info and prepare to start updating headers on
+                * our way back down the stack of protocols.
+                */
+               skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
+               skb_shinfo(segs)->gso_segs = partial_segs;
+               skb_shinfo(segs)->gso_type = type;
+               SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
+       }
+
        /* Following permits correct backpressure, for protocols
         * using skb_set_owner_w().
         * Idea is to tranfert ownership from head_skb to last segment.
@@ -4502,13 +4532,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
                __skb_push(skb, offset);
                err = __vlan_insert_tag(skb, skb->vlan_proto,
                                        skb_vlan_tag_get(skb));
-               if (err)
+               if (err) {
+                       __skb_pull(skb, offset);
                        return err;
+               }
+
                skb->protocol = skb->vlan_proto;
                skb->mac_len += VLAN_HLEN;
-               __skb_pull(skb, offset);
 
                skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+               __skb_pull(skb, offset);
        }
        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
        return 0;
@@ -4592,3 +4625,239 @@ failure:
        return NULL;
 }
 EXPORT_SYMBOL(alloc_skb_with_frags);
+
+/* carve out the first off bytes from skb when off < headlen */
+static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
+                                   const int headlen, gfp_t gfp_mask)
+{
+       int i;
+       int size = skb_end_offset(skb);
+       int new_hlen = headlen - off;
+       u8 *data;
+
+       size = SKB_DATA_ALIGN(size);
+
+       if (skb_pfmemalloc(skb))
+               gfp_mask |= __GFP_MEMALLOC;
+       data = kmalloc_reserve(size +
+                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+                              gfp_mask, NUMA_NO_NODE, NULL);
+       if (!data)
+               return -ENOMEM;
+
+       size = SKB_WITH_OVERHEAD(ksize(data));
+
+       /* Copy real data, and all frags */
+       skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
+       skb->len -= off;
+
+       memcpy((struct skb_shared_info *)(data + size),
+              skb_shinfo(skb),
+              offsetof(struct skb_shared_info,
+                       frags[skb_shinfo(skb)->nr_frags]));
+       if (skb_cloned(skb)) {
+               /* drop the old head gracefully */
+               if (skb_orphan_frags(skb, gfp_mask)) {
+                       kfree(data);
+                       return -ENOMEM;
+               }
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+                       skb_frag_ref(skb, i);
+               if (skb_has_frag_list(skb))
+                       skb_clone_fraglist(skb);
+               skb_release_data(skb);
+       } else {
+               /* we can reuse existing recount- all we did was
+                * relocate values
+                */
+               skb_free_head(skb);
+       }
+
+       skb->head = data;
+       skb->data = data;
+       skb->head_frag = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end = size;
+#else
+       skb->end = skb->head + size;
+#endif
+       skb_set_tail_pointer(skb, skb_headlen(skb));
+       skb_headers_offset_update(skb, 0);
+       skb->cloned = 0;
+       skb->hdr_len = 0;
+       skb->nohdr = 0;
+       atomic_set(&skb_shinfo(skb)->dataref, 1);
+
+       return 0;
+}
+
+static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
+
+/* carve out the first eat bytes from skb's frag_list. May recurse into
+ * pskb_carve()
+ */
+static int pskb_carve_frag_list(struct sk_buff *skb,
+                               struct skb_shared_info *shinfo, int eat,
+                               gfp_t gfp_mask)
+{
+       struct sk_buff *list = shinfo->frag_list;
+       struct sk_buff *clone = NULL;
+       struct sk_buff *insp = NULL;
+
+       do {
+               if (!list) {
+                       pr_err("Not enough bytes to eat. Want %d\n", eat);
+                       return -EFAULT;
+               }
+               if (list->len <= eat) {
+                       /* Eaten as whole. */
+                       eat -= list->len;
+                       list = list->next;
+                       insp = list;
+               } else {
+                       /* Eaten partially. */
+                       if (skb_shared(list)) {
+                               clone = skb_clone(list, gfp_mask);
+                               if (!clone)
+                                       return -ENOMEM;
+                               insp = list->next;
+                               list = clone;
+                       } else {
+                               /* This may be pulled without problems. */
+                               insp = list;
+                       }
+                       if (pskb_carve(list, eat, gfp_mask) < 0) {
+                               kfree_skb(clone);
+                               return -ENOMEM;
+                       }
+                       break;
+               }
+       } while (eat);
+
+       /* Free pulled out fragments. */
+       while ((list = shinfo->frag_list) != insp) {
+               shinfo->frag_list = list->next;
+               kfree_skb(list);
+       }
+       /* And insert new clone at head. */
+       if (clone) {
+               clone->next = list;
+               shinfo->frag_list = clone;
+       }
+       return 0;
+}
+
+/* carve off first len bytes from skb. Split line (off) is in the
+ * non-linear part of skb
+ */
+static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
+                                      int pos, gfp_t gfp_mask)
+{
+       int i, k = 0;
+       int size = skb_end_offset(skb);
+       u8 *data;
+       const int nfrags = skb_shinfo(skb)->nr_frags;
+       struct skb_shared_info *shinfo;
+
+       size = SKB_DATA_ALIGN(size);
+
+       if (skb_pfmemalloc(skb))
+               gfp_mask |= __GFP_MEMALLOC;
+       data = kmalloc_reserve(size +
+                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+                              gfp_mask, NUMA_NO_NODE, NULL);
+       if (!data)
+               return -ENOMEM;
+
+       size = SKB_WITH_OVERHEAD(ksize(data));
+
+       memcpy((struct skb_shared_info *)(data + size),
+              skb_shinfo(skb), offsetof(struct skb_shared_info,
+                                        frags[skb_shinfo(skb)->nr_frags]));
+       if (skb_orphan_frags(skb, gfp_mask)) {
+               kfree(data);
+               return -ENOMEM;
+       }
+       shinfo = (struct skb_shared_info *)(data + size);
+       for (i = 0; i < nfrags; i++) {
+               int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+               if (pos + fsize > off) {
+                       shinfo->frags[k] = skb_shinfo(skb)->frags[i];
+
+                       if (pos < off) {
+                               /* Split frag.
+                                * We have two variants in this case:
+                                * 1. Move all the frag to the second
+                                *    part, if it is possible. F.e.
+                                *    this approach is mandatory for TUX,
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+                               shinfo->frags[0].page_offset += off - pos;
+                               skb_frag_size_sub(&shinfo->frags[0], off - pos);
+                       }
+                       skb_frag_ref(skb, i);
+                       k++;
+               }
+               pos += fsize;
+       }
+       shinfo->nr_frags = k;
+       if (skb_has_frag_list(skb))
+               skb_clone_fraglist(skb);
+
+       if (k == 0) {
+               /* split line is in frag list */
+               pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
+       }
+       skb_release_data(skb);
+
+       skb->head = data;
+       skb->head_frag = 0;
+       skb->data = data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end = size;
+#else
+       skb->end = skb->head + size;
+#endif
+       skb_reset_tail_pointer(skb);
+       skb_headers_offset_update(skb, 0);
+       skb->cloned   = 0;
+       skb->hdr_len  = 0;
+       skb->nohdr    = 0;
+       skb->len -= off;
+       skb->data_len = skb->len;
+       atomic_set(&skb_shinfo(skb)->dataref, 1);
+       return 0;
+}
+
+/* remove len bytes from the beginning of the skb */
+static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
+{
+       int headlen = skb_headlen(skb);
+
+       if (len < headlen)
+               return pskb_carve_inside_header(skb, len, headlen, gfp);
+       else
+               return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
+}
+
+/* Extract to_copy bytes starting at off from skb, and return this in
+ * a new skb
+ */
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
+                            int to_copy, gfp_t gfp)
+{
+       struct sk_buff  *clone = skb_clone(skb, gfp);
+
+       if (!clone)
+               return NULL;
+
+       if (pskb_carve(clone, off, gfp) < 0 ||
+           pskb_trim(clone, to_copy)) {
+               kfree_skb(clone);
+               return NULL;
+       }
+       return clone;
+}
+EXPORT_SYMBOL(pskb_extract);
index 152274d188ef39f3a674d57e3606d9a97338c5d5..08bf97eceeb3827d0b237d8c01910e5e0a0f5d6a 100644 (file)
@@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
+  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
+  "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
+  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
+  "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
+  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
+  "clock-AF_MAX"
 };
 
 /*
@@ -1652,6 +1655,17 @@ void sock_wfree(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_wfree);
 
+/* This variant of sock_wfree() is used by TCP,
+ * since it sets SOCK_USE_WRITE_QUEUE.
+ */
+void __sock_wfree(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+
+       if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
+               __sk_free(sk);
+}
+
 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
 {
        skb_orphan(skb);
@@ -1674,8 +1688,21 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
 }
 EXPORT_SYMBOL(skb_set_owner_w);
 
+/* This helper is used by netem, as it can hold packets in its
+ * delay queue. We want to allow the owner socket to send more
+ * packets, as if they were already TX completed by a typical driver.
+ * But we also want to keep skb->sk set because some packet schedulers
+ * rely on it (sch_fq for example). So we set skb->truesize to a small
+ * amount (1) and decrease sk_wmem_alloc accordingly.
+ */
 void skb_orphan_partial(struct sk_buff *skb)
 {
+       /* If this skb is a TCP pure ACK or already went here,
+        * we have nothing to do. 2 is already a very small truesize.
+        */
+       if (skb->truesize <= 2)
+               return;
+
        /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
         * so we do not completely orphan skb, but transfert all
         * accounted bytes but one, to avoid unexpected reorders.
@@ -2016,33 +2043,27 @@ static void __release_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
 {
-       struct sk_buff *skb = sk->sk_backlog.head;
+       struct sk_buff *skb, *next;
 
-       do {
+       while ((skb = sk->sk_backlog.head) != NULL) {
                sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
-               bh_unlock_sock(sk);
 
-               do {
-                       struct sk_buff *next = skb->next;
+               spin_unlock_bh(&sk->sk_lock.slock);
 
+               do {
+                       next = skb->next;
                        prefetch(next);
                        WARN_ON_ONCE(skb_dst_is_noref(skb));
                        skb->next = NULL;
                        sk_backlog_rcv(sk, skb);
 
-                       /*
-                        * We are in process context here with softirqs
-                        * disabled, use cond_resched_softirq() to preempt.
-                        * This is safe to do because we've taken the backlog
-                        * queue private:
-                        */
-                       cond_resched_softirq();
+                       cond_resched();
 
                        skb = next;
                } while (skb != NULL);
 
-               bh_lock_sock(sk);
-       } while ((skb = sk->sk_backlog.head) != NULL);
+               spin_lock_bh(&sk->sk_lock.slock);
+       }
 
        /*
         * Doing the zeroing here guarantee we can not loop forever
@@ -2051,6 +2072,13 @@ static void __release_sock(struct sock *sk)
        sk->sk_backlog.len = 0;
 }
 
+void __sk_flush_backlog(struct sock *sk)
+{
+       spin_lock_bh(&sk->sk_lock.slock);
+       __release_sock(sk);
+       spin_unlock_bh(&sk->sk_lock.slock);
+}
+
 /**
  * sk_wait_data - wait for data to arrive at sk_receive_queue
  * @sk:    sock to wait on
index ca9e35bbe13ca1f6f7100f755c046ea60f624a06..6b10573cc9faa790fe261b452b85f3b774c3ec21 100644 (file)
@@ -120,7 +120,7 @@ static size_t sock_diag_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
               + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
-              + nla_total_size(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
+              + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
 }
 
 static void sock_diag_broadcast_destroy_work(struct work_struct *work)
index b0e28d24e1a749ce1cfa7204a7cbdd201da8368f..0c55ffb859bf5ddcb34d08ff707113f48e4d500d 100644 (file)
@@ -198,9 +198,9 @@ struct dccp_mib {
 };
 
 DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
-#define DCCP_INC_STATS(field)      SNMP_INC_STATS(dccp_statistics, field)
-#define DCCP_INC_STATS_BH(field)    SNMP_INC_STATS_BH(dccp_statistics, field)
-#define DCCP_DEC_STATS(field)      SNMP_DEC_STATS(dccp_statistics, field)
+#define DCCP_INC_STATS(field)  SNMP_INC_STATS(dccp_statistics, field)
+#define __DCCP_INC_STATS(field)        __SNMP_INC_STATS(dccp_statistics, field)
+#define DCCP_DEC_STATS(field)  SNMP_DEC_STATS(dccp_statistics, field)
 
 /*
  *     Checksumming routines
index 3bd14e8853969d8e69f1b67b6eaecd4cf16864e1..ba347184bda9b3fee1f86db847352ab133ddaa3f 100644 (file)
@@ -359,7 +359,7 @@ send_sync:
                goto discard;
        }
 
-       DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
+       DCCP_INC_STATS(DCCP_MIB_INERRS);
 discard:
        __kfree_skb(skb);
        return 0;
index f6d183f8f3322218fda743952ce3124d5f3b3e4f..5c7e413a3ae407e67565b48a8bd6f43e3b02de4d 100644 (file)
@@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
         * socket here.
         */
        if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
        } else {
                /*
                 * Still in RESPOND, just remove it silently.
@@ -247,7 +247,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 
        if (skb->len < offset + sizeof(*dh) ||
            skb->len < offset + __dccp_basic_hdr_len(dh)) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
        }
 
@@ -256,7 +256,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                                       iph->saddr, ntohs(dh->dccph_sport),
                                       inet_iif(skb));
        if (!sk) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
        }
 
@@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == DCCP_CLOSED)
                goto out;
@@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
        dp = dccp_sk(sk);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -318,7 +318,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
        case DCCP_REQUESTING:
        case DCCP_RESPOND:
                if (!sock_owned_by_user(sk)) {
-                       DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+                       __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
                        sk->sk_err = err;
 
                        sk->sk_error_report(sk);
@@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        return newsk;
 
 exit_overflow:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit_nonewsk:
        dst_release(dst);
 exit:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 put_and_exit:
        inet_csk_prepare_forced_close(newsk);
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+               __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
 
@@ -533,8 +533,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
        bh_unlock_sock(ctl_sk);
 
        if (net_xmit_eval(err) == 0) {
-               DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-               DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+               DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+               DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
        }
 out:
         dst_release(dst);
@@ -637,7 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 drop_and_free:
        reqsk_free(req);
 drop:
-       DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+       __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
        return -1;
 }
 EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
index 8ceb3cebcad4b6510f04049813cf31a6a7530053..d176f4e66369a399f5fe8a440eb864dbac9c7542 100644 (file)
@@ -80,8 +80,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (skb->len < offset + sizeof(*dh) ||
            skb->len < offset + __dccp_basic_hdr_len(dh)) {
-               ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-                                  ICMP6_MIB_INERRORS);
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+                                 ICMP6_MIB_INERRORS);
                return;
        }
 
@@ -91,8 +91,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                        inet6_iif(skb));
 
        if (!sk) {
-               ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-                                  ICMP6_MIB_INERRORS);
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+                                 ICMP6_MIB_INERRORS);
                return;
        }
 
@@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == DCCP_CLOSED)
                goto out;
@@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        dp = dccp_sk(sk);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -156,7 +156,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        case DCCP_RESPOND:  /* Cannot happen.
                               It can, it SYNs are crossed. --ANK */
                if (!sock_owned_by_user(sk)) {
-                       DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+                       __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
                        sk->sk_err = err;
                        /*
                         * Wake people up to see the error
@@ -277,8 +277,8 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
        if (!IS_ERR(dst)) {
                skb_dst_set(skb, dst);
                ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
-               DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-               DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+               DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+               DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
                return;
        }
 
@@ -378,7 +378,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 drop_and_free:
        reqsk_free(req);
 drop:
-       DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+       __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
        return -1;
 }
 
@@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
        return newsk;
 
 out_overflow:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out_nonewsk:
        dst_release(dst);
 out:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 }
 
index 1994f8af646b15fe668c01b207567f9016865c4f..53eddf99e4f6eb7f40f4a69359956e0d25803add 100644 (file)
@@ -127,7 +127,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
                }
                dccp_init_xmit_timers(newsk);
 
-               DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
+               __DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
        }
        return newsk;
 }
index 9bce31886bda4e285d89e0f5d8d344956f01ce00..74d29c56c36709fd4e31f0e63a1f8b1aa38a32cd 100644 (file)
@@ -253,7 +253,7 @@ out_nonsensical_length:
        return 0;
 
 out_invalid_option:
-       DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
+       DCCP_INC_STATS(DCCP_MIB_INVALIDOPT);
        rc = DCCP_RESET_CODE_OPTION_ERROR;
 out_featneg_failed:
        DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc);
index 3ef7acef3ce8c17f3a2e873b8178b7ee2f7cd619..3a2c3402775860b3d1aeeffaf52f951b1b9c9272 100644 (file)
@@ -28,7 +28,7 @@ static void dccp_write_err(struct sock *sk)
 
        dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
        dccp_done(sk);
-       DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
+       __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
 }
 
 /* A write timeout has occurred. Process the after effects. */
@@ -100,7 +100,7 @@ static void dccp_retransmit_timer(struct sock *sk)
         * total number of retransmissions of clones of original packets.
         */
        if (icsk->icsk_retransmits == 0)
-               DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
+               __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
 
        if (dccp_retransmit_skb(sk) != 0) {
                /*
@@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data)
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
                icsk->icsk_ack.blocked = 1;
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                sk_reset_timer(sk, &icsk->icsk_delack_timer,
                               jiffies + TCP_DELACK_MIN);
                goto out;
@@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data)
                        icsk->icsk_ack.ato = TCP_ATO_MIN;
                }
                dccp_send_ack(sk);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
 out:
        bh_unlock_sock(sk);
index 607a14f20d88011e6de8540b21a69e6527d49df0..b1dc096d22f8c83e771b1df68d7815661ac51bae 100644 (file)
@@ -1034,10 +1034,13 @@ source_ok:
        if (!fld.daddr) {
                fld.daddr = fld.saddr;
 
-               err = -EADDRNOTAVAIL;
                if (dev_out)
                        dev_put(dev_out);
+               err = -EINVAL;
                dev_out = init_net.loopback_dev;
+               if (!dev_out->dn_ptr)
+                       goto out;
+               err = -EADDRNOTAVAIL;
                dev_hold(dev_out);
                if (!fld.daddr) {
                        fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
                if (dev_out == NULL)
                        goto out;
                dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+               if (!dn_db)
+                       goto e_inval;
                /* Possible improvement - check all devices for local addr */
                if (dn_dev_islocal(dev_out, fld.daddr)) {
                        dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
                        dev_put(dev_out);
                dev_out = init_net.loopback_dev;
                dev_hold(dev_out);
+               if (!dev_out->dn_ptr)
+                       goto e_inval;
                fld.flowidn_oif = dev_out->ifindex;
                if (res.fi)
                        dn_fib_info_put(res.fi);
index c28c47463b7edfc58b08e2d059e0e69b66a3fff7..eff5dfc2e33fc6c8c5cc50b6f3d6901219c95757 100644 (file)
@@ -51,11 +51,12 @@ void unregister_switch_driver(struct dsa_switch_driver *drv)
 EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
 static struct dsa_switch_driver *
-dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
+dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
+                const char **_name, void **priv)
 {
        struct dsa_switch_driver *ret;
        struct list_head *list;
-       char *name;
+       const char *name;
 
        ret = NULL;
        name = NULL;
@@ -66,7 +67,7 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
 
                drv = list_entry(list, struct dsa_switch_driver, list);
 
-               name = drv->probe(host_dev, sw_addr);
+               name = drv->probe(parent, host_dev, sw_addr, priv);
                if (name != NULL) {
                        ret = drv;
                        break;
@@ -181,7 +182,7 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
 /* basic switch operations **************************************************/
 static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
 {
-       struct dsa_chip_data *cd = ds->pd;
+       struct dsa_chip_data *cd = ds->cd;
        struct device_node *port_dn;
        struct phy_device *phydev;
        int ret, port, mode;
@@ -218,7 +219,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
        struct dsa_switch_driver *drv = ds->drv;
        struct dsa_switch_tree *dst = ds->dst;
-       struct dsa_chip_data *pd = ds->pd;
+       struct dsa_chip_data *cd = ds->cd;
        bool valid_name_found = false;
        int index = ds->index;
        int i, ret;
@@ -229,7 +230,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        for (i = 0; i < DSA_MAX_PORTS; i++) {
                char *name;
 
-               name = pd->port_names[i];
+               name = cd->port_names[i];
                if (name == NULL)
                        continue;
 
@@ -245,7 +246,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
                } else if (!strcmp(name, "dsa")) {
                        ds->dsa_port_mask |= 1 << i;
                } else {
-                       ds->phys_port_mask |= 1 << i;
+                       ds->enabled_port_mask |= 1 << i;
                }
                valid_name_found = true;
        }
@@ -258,7 +259,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        /* Make the built-in MII bus mask match the number of ports,
         * switch drivers can override this later
         */
-       ds->phys_mii_mask = ds->phys_port_mask;
+       ds->phys_mii_mask = ds->enabled_port_mask;
 
        /*
         * If the CPU connects to this switch, set the switch tree
@@ -266,7 +267,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
         * switch.
         */
        if (dst->cpu_switch == index) {
-               switch (ds->tag_protocol) {
+               switch (drv->tag_protocol) {
 #ifdef CONFIG_NET_DSA_TAG_DSA
                case DSA_TAG_PROTO_DSA:
                        dst->rcv = dsa_netdev_ops.rcv;
@@ -294,7 +295,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
                        goto out;
                }
 
-               dst->tag_protocol = ds->tag_protocol;
+               dst->tag_protocol = drv->tag_protocol;
        }
 
        /*
@@ -324,13 +325,13 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
         * Create network devices for physical switch ports.
         */
        for (i = 0; i < DSA_MAX_PORTS; i++) {
-               if (!(ds->phys_port_mask & (1 << i)))
+               if (!(ds->enabled_port_mask & (1 << i)))
                        continue;
 
-               ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
+               ret = dsa_slave_create(ds, parent, i, cd->port_names[i]);
                if (ret < 0) {
                        netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
-                                  index, i, pd->port_names[i], ret);
+                                  index, i, cd->port_names[i], ret);
                        ret = 0;
                }
        }
@@ -378,16 +379,17 @@ static struct dsa_switch *
 dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                 struct device *parent, struct device *host_dev)
 {
-       struct dsa_chip_data *pd = dst->pd->chip + index;
+       struct dsa_chip_data *cd = dst->pd->chip + index;
        struct dsa_switch_driver *drv;
        struct dsa_switch *ds;
        int ret;
-       char *name;
+       const char *name;
+       void *priv;
 
        /*
         * Probe for switch model.
         */
-       drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
+       drv = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv);
        if (drv == NULL) {
                netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
                           index);
@@ -400,16 +402,16 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        /*
         * Allocate and initialise switch state.
         */
-       ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL);
        if (ds == NULL)
                return ERR_PTR(-ENOMEM);
 
        ds->dst = dst;
        ds->index = index;
-       ds->pd = pd;
+       ds->cd = cd;
        ds->drv = drv;
-       ds->tag_protocol = drv->tag_protocol;
-       ds->master_dev = host_dev;
+       ds->priv = priv;
+       ds->dev = parent;
 
        ret = dsa_switch_setup_one(ds, parent);
        if (ret)
@@ -422,7 +424,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 {
        struct device_node *port_dn;
        struct phy_device *phydev;
-       struct dsa_chip_data *cd = ds->pd;
+       struct dsa_chip_data *cd = ds->cd;
        int port;
 
 #ifdef CONFIG_NET_DSA_HWMON
@@ -432,7 +434,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 
        /* Destroy network devices for physical switch ports. */
        for (port = 0; port < DSA_MAX_PORTS; port++) {
-               if (!(ds->phys_port_mask & (1 << port)))
+               if (!(ds->enabled_port_mask & (1 << port)))
                        continue;
 
                if (!ds->ports[port])
@@ -657,9 +659,6 @@ static int dsa_of_probe(struct device *dev)
        const char *port_name;
        int chip_index, port_index;
        const unsigned int *sw_addr, *port_reg;
-       int gpio;
-       enum of_gpio_flags of_flags;
-       unsigned long flags;
        u32 eeprom_len;
        int ret;
 
@@ -738,19 +737,6 @@ static int dsa_of_probe(struct device *dev)
                        put_device(cd->host_dev);
                        cd->host_dev = &mdio_bus_switch->dev;
                }
-               gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
-                                              &of_flags);
-               if (gpio_is_valid(gpio)) {
-                       flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
-                                GPIOF_ACTIVE_LOW : 0);
-                       ret = devm_gpio_request_one(dev, gpio, flags,
-                                                   "switch_reset");
-                       if (ret)
-                               goto out_free_chip;
-
-                       cd->reset = gpio_to_desc(gpio);
-                       gpiod_direction_output(cd->reset, 0);
-               }
 
                for_each_available_child_of_node(child, port) {
                        port_reg = of_get_property(port, "reg", NULL);
index 1d1a54687e4abc1b1eced0b665bc501e068e2d63..dfa33779d49cc2f012fd3c2b3ff27dfc580be84f 100644 (file)
@@ -22,11 +22,6 @@ struct dsa_device_ops {
 };
 
 struct dsa_slave_priv {
-       /*
-        * The linux network interface corresponding to this
-        * switch port.
-        */
-       struct net_device       *dev;
        struct sk_buff *        (*xmit)(struct sk_buff *skb,
                                        struct net_device *dev);
 
index 2dae0d06435982a795c4cdb26992304e7d86b99e..152436cdab30ac351b0e24d8db9148940ceb3496 100644 (file)
@@ -50,8 +50,8 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
        ds->slave_mii_bus->read = dsa_slave_phy_read;
        ds->slave_mii_bus->write = dsa_slave_phy_write;
        snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
-                       ds->index, ds->pd->sw_addr);
-       ds->slave_mii_bus->parent = ds->master_dev;
+                       ds->index, ds->cd->sw_addr);
+       ds->slave_mii_bus->parent = ds->dev;
        ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 }
 
@@ -615,8 +615,8 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
 
-       if (ds->pd->eeprom_len)
-               return ds->pd->eeprom_len;
+       if (ds->cd->eeprom_len)
+               return ds->cd->eeprom_len;
 
        if (ds->drv->get_eeprom_len)
                return ds->drv->get_eeprom_len(ds);
@@ -666,6 +666,78 @@ static void dsa_slave_get_strings(struct net_device *dev,
        }
 }
 
+static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
+                                          struct ethtool_stats *stats,
+                                          uint64_t *data)
+{
+       struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct dsa_switch *ds = dst->ds[0];
+       s8 cpu_port = dst->cpu_port;
+       int count = 0;
+
+       if (dst->master_ethtool_ops.get_sset_count) {
+               count = dst->master_ethtool_ops.get_sset_count(dev,
+                                                              ETH_SS_STATS);
+               dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data);
+       }
+
+       if (ds->drv->get_ethtool_stats)
+               ds->drv->get_ethtool_stats(ds, cpu_port, data + count);
+}
+
+static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
+{
+       struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct dsa_switch *ds = dst->ds[0];
+       int count = 0;
+
+       if (dst->master_ethtool_ops.get_sset_count)
+               count += dst->master_ethtool_ops.get_sset_count(dev, sset);
+
+       if (sset == ETH_SS_STATS && ds->drv->get_sset_count)
+               count += ds->drv->get_sset_count(ds);
+
+       return count;
+}
+
+static void dsa_cpu_port_get_strings(struct net_device *dev,
+                                    uint32_t stringset, uint8_t *data)
+{
+       struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct dsa_switch *ds = dst->ds[0];
+       s8 cpu_port = dst->cpu_port;
+       int len = ETH_GSTRING_LEN;
+       int mcount = 0, count;
+       unsigned int i;
+       uint8_t pfx[4];
+       uint8_t *ndata;
+
+       snprintf(pfx, sizeof(pfx), "p%.2d", cpu_port);
+       /* We do not want to be NULL-terminated, since this is a prefix */
+       pfx[sizeof(pfx) - 1] = '_';
+
+       if (dst->master_ethtool_ops.get_sset_count) {
+               mcount = dst->master_ethtool_ops.get_sset_count(dev,
+                                                               ETH_SS_STATS);
+               dst->master_ethtool_ops.get_strings(dev, stringset, data);
+       }
+
+       if (stringset == ETH_SS_STATS && ds->drv->get_strings) {
+               ndata = data + mcount * len;
+               /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+                * the output after to prepend our CPU port prefix we
+                * constructed earlier
+                */
+               ds->drv->get_strings(ds, cpu_port, ndata);
+               count = ds->drv->get_sset_count(ds);
+               for (i = 0; i < count; i++) {
+                       memmove(ndata + (i * len + sizeof(pfx)),
+                               ndata + i * len, len - sizeof(pfx));
+                       memcpy(ndata + i * len, pfx, sizeof(pfx));
+               }
+       }
+}
+
 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
                                        struct ethtool_stats *stats,
                                        uint64_t *data)
@@ -673,10 +745,10 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
 
-       data[0] = p->dev->stats.tx_packets;
-       data[1] = p->dev->stats.tx_bytes;
-       data[2] = p->dev->stats.rx_packets;
-       data[3] = p->dev->stats.rx_bytes;
+       data[0] = dev->stats.tx_packets;
+       data[1] = dev->stats.tx_bytes;
+       data[2] = dev->stats.rx_packets;
+       data[3] = dev->stats.rx_bytes;
        if (ds->drv->get_ethtool_stats != NULL)
                ds->drv->get_ethtool_stats(ds, p->port, data + 4);
 }
@@ -821,6 +893,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_eee                = dsa_slave_get_eee,
 };
 
+static struct ethtool_ops dsa_cpu_port_ethtool_ops;
+
 static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_open               = dsa_slave_open,
        .ndo_stop               = dsa_slave_close,
@@ -925,7 +999,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
                                struct net_device *slave_dev)
 {
        struct dsa_switch *ds = p->parent;
-       struct dsa_chip_data *cd = ds->pd;
+       struct dsa_chip_data *cd = ds->cd;
        struct device_node *phy_dn, *port_dn;
        bool phy_is_fixed = false;
        u32 phy_flags = 0;
@@ -1038,6 +1112,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
                     int port, char *name)
 {
        struct net_device *master = ds->dst->master_netdev;
+       struct dsa_switch_tree *dst = ds->dst;
        struct net_device *slave_dev;
        struct dsa_slave_priv *p;
        int ret;
@@ -1049,6 +1124,19 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
 
        slave_dev->features = master->vlan_features;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+       if (master->ethtool_ops != &dsa_cpu_port_ethtool_ops) {
+               memcpy(&dst->master_ethtool_ops, master->ethtool_ops,
+                      sizeof(struct ethtool_ops));
+               memcpy(&dsa_cpu_port_ethtool_ops, &dst->master_ethtool_ops,
+                      sizeof(struct ethtool_ops));
+               dsa_cpu_port_ethtool_ops.get_sset_count =
+                                       dsa_cpu_port_get_sset_count;
+               dsa_cpu_port_ethtool_ops.get_ethtool_stats =
+                                       dsa_cpu_port_get_ethtool_stats;
+               dsa_cpu_port_ethtool_ops.get_strings =
+                                       dsa_cpu_port_get_strings;
+               master->ethtool_ops = &dsa_cpu_port_ethtool_ops;
+       }
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->priv_flags |= IFF_NO_QUEUE;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
@@ -1059,11 +1147,10 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
                                 NULL);
 
        SET_NETDEV_DEV(slave_dev, parent);
-       slave_dev->dev.of_node = ds->pd->port_dn[port];
+       slave_dev->dev.of_node = ds->cd->port_dn[port];
        slave_dev->vlan_features = master->vlan_features;
 
        p = netdev_priv(slave_dev);
-       p->dev = slave_dev;
        p->parent = ds;
        p->port = port;
 
index 0d3d709052cac91a807d38bc3e971bc2ae8c6a36..4b683fd0abf128d617b15f6e0722058de81c08b8 100644 (file)
@@ -18,8 +18,9 @@ config HSR
          earlier.
 
          This code is a "best effort" to comply with the HSR standard as
-         described in IEC 62439-3:2010 (HSRv0), but no compliancy tests have
-         been made.
+         described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1),
+         but no compliancy tests have been made. Use iproute2 to select
+         the version you desire.
 
          You need to perform any and all necessary tests yourself before
          relying on this code in a safety critical system!
index c7d1adca30d891b183b0832712e0d57aa1f33201..386cbce7bc518117ddd6bff8703aebe00bfeb325 100644 (file)
@@ -90,7 +90,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
 
        hsr = netdev_priv(hsr_dev);
 
-       if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
+       if ((hsr_dev->operstate == IF_OPER_UP)
+                       && (old_operstate != IF_OPER_UP)) {
                /* Went up */
                hsr->announce_count = 0;
                hsr->announce_timer.expires = jiffies +
@@ -250,31 +251,22 @@ static const struct header_ops hsr_header_ops = {
        .parse   = eth_header_parse,
 };
 
-
-/* HSR:2010 supervision frames should be padded so that the whole frame,
- * including headers and FCS, is 64 bytes (without VLAN).
- */
-static int hsr_pad(int size)
-{
-       const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN;
-
-       if (size >= min_size)
-               return size;
-       return min_size;
-}
-
-static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
+static void send_hsr_supervision_frame(struct hsr_port *master,
+               u8 type, u8 hsrVer)
 {
        struct sk_buff *skb;
        int hlen, tlen;
+       struct hsr_tag *hsr_tag;
        struct hsr_sup_tag *hsr_stag;
        struct hsr_sup_payload *hsr_sp;
        unsigned long irqflags;
 
        hlen = LL_RESERVED_SPACE(master->dev);
        tlen = master->dev->needed_tailroom;
-       skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
-                       GFP_ATOMIC);
+       skb = dev_alloc_skb(
+                       sizeof(struct hsr_tag) +
+                       sizeof(struct hsr_sup_tag) +
+                       sizeof(struct hsr_sup_payload) + hlen + tlen);
 
        if (skb == NULL)
                return;
@@ -282,32 +274,48 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
        skb_reserve(skb, hlen);
 
        skb->dev = master->dev;
-       skb->protocol = htons(ETH_P_PRP);
+       skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP);
        skb->priority = TC_PRIO_CONTROL;
 
-       if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+       if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP),
                            master->hsr->sup_multicast_addr,
                            skb->dev->dev_addr, skb->len) <= 0)
                goto out;
        skb_reset_mac_header(skb);
 
-       hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(*hsr_stag));
+       if (hsrVer > 0) {
+               hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag));
+               hsr_tag->encap_proto = htons(ETH_P_PRP);
+               set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
+       }
 
-       set_hsr_stag_path(hsr_stag, 0xf);
-       set_hsr_stag_HSR_Ver(hsr_stag, 0);
+       hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag));
+       set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf));
+       set_hsr_stag_HSR_Ver(hsr_stag, hsrVer);
 
+       /* From HSRv1 on we have separate supervision sequence numbers. */
        spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
-       hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
-       master->hsr->sequence_nr++;
+       if (hsrVer > 0) {
+               hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
+               hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
+               master->hsr->sup_sequence_nr++;
+               master->hsr->sequence_nr++;
+       } else {
+               hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
+               master->hsr->sequence_nr++;
+       }
        spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 
        hsr_stag->HSR_TLV_Type = type;
-       hsr_stag->HSR_TLV_Length = 12;
+       /* TODO: Why 12 in HSRv0? */
+       hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12;
 
        /* Payload: MacAddressA */
-       hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
+       hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload));
        ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
 
+       skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
+
        hsr_forward_skb(skb, master);
        return;
 
@@ -329,19 +337,20 @@ static void hsr_announce(unsigned long data)
        rcu_read_lock();
        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 
-       if (hsr->announce_count < 3) {
-               send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE);
+       if (hsr->announce_count < 3 && hsr->protVersion == 0) {
+               send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE,
+                               hsr->protVersion);
                hsr->announce_count++;
-       } else {
-               send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK);
-       }
 
-       if (hsr->announce_count < 3)
                hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-       else
+       } else {
+               send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
+                               hsr->protVersion);
+
                hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+       }
 
        if (is_admin_up(master->dev))
                add_timer(&hsr->announce_timer);
@@ -428,7 +437,7 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
 };
 
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
-                    unsigned char multicast_spec)
+                    unsigned char multicast_spec, u8 protocol_version)
 {
        struct hsr_priv *hsr;
        struct hsr_port *port;
@@ -450,6 +459,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        spin_lock_init(&hsr->seqnr_lock);
        /* Overflow soon to find bugs easier: */
        hsr->sequence_nr = HSR_SEQNR_START;
+       hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
 
        init_timer(&hsr->announce_timer);
        hsr->announce_timer.function = hsr_announce;
@@ -462,6 +472,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
        hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
+       hsr->protVersion = protocol_version;
+
        /* FIXME: should I modify the value of these?
         *
         * - hsr_dev->flags - i.e.
index 108a5d59d2a6433d27960815fa446bddf0e7a304..9975e31bbb827b7f7617372a992e596addaaf6c6 100644 (file)
@@ -17,7 +17,7 @@
 
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
-                    unsigned char multicast_spec);
+                    unsigned char multicast_spec, u8 protocol_version);
 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
 int hsr_get_max_mtu(struct hsr_priv *hsr);
index 7871ed6d38256c3cdaed0733744af58750372dd5..5ee1d43f13100849c6ac28a75082d65f69350d72 100644 (file)
@@ -50,21 +50,40 @@ struct hsr_frame_info {
  */
 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
 {
-       struct hsr_ethhdr_sp *hdr;
+       struct ethhdr *ethHdr;
+       struct hsr_sup_tag *hsrSupTag;
+       struct hsrv1_ethhdr_sp *hsrV1Hdr;
 
        WARN_ON_ONCE(!skb_mac_header_was_set(skb));
-       hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+       ethHdr = (struct ethhdr *) skb_mac_header(skb);
 
-       if (!ether_addr_equal(hdr->ethhdr.h_dest,
+       /* Correct addr? */
+       if (!ether_addr_equal(ethHdr->h_dest,
                              hsr->sup_multicast_addr))
                return false;
 
-       if (get_hsr_stag_path(&hdr->hsr_sup) != 0x0f)
+       /* Correct ether type?. */
+       if (!(ethHdr->h_proto == htons(ETH_P_PRP)
+                       || ethHdr->h_proto == htons(ETH_P_HSR)))
                return false;
-       if ((hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
-           (hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+
+       /* Get the supervision header from correct location. */
+       if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
+               hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb);
+               if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP))
+                       return false;
+
+               hsrSupTag = &hsrV1Hdr->hsr_sup;
+       } else {
+               hsrSupTag = &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup;
+       }
+
+       if ((hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+           (hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
                return false;
-       if (hdr->hsr_sup.HSR_TLV_Length != 12)
+       if ((hsrSupTag->HSR_TLV_Length != 12) &&
+                       (hsrSupTag->HSR_TLV_Length !=
+                                       sizeof(struct hsr_sup_payload)))
                return false;
 
        return true;
@@ -110,7 +129,7 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
 
 
 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
-                        struct hsr_port *port)
+                        struct hsr_port *port, u8 protoVersion)
 {
        struct hsr_ethhdr *hsr_ethhdr;
        int lane_id;
@@ -131,7 +150,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
        set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
        hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
        hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
-       hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+       hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ?
+                       ETH_P_HSR : ETH_P_PRP);
 }
 
 static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
@@ -160,7 +180,7 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
        memmove(dst, src, movelen);
        skb_reset_mac_header(skb);
 
-       hsr_fill_tag(skb, frame, port);
+       hsr_fill_tag(skb, frame, port, port->hsr->protVersion);
 
        return skb;
 }
@@ -320,7 +340,8 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
                /* FIXME: */
                WARN_ONCE(1, "HSR: VLAN not yet supported");
        }
-       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+       if (ethhdr->h_proto == htons(ETH_P_PRP)
+                       || ethhdr->h_proto == htons(ETH_P_HSR)) {
                frame->skb_std = NULL;
                frame->skb_hsr = skb;
                frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
index bace124d14ef570375b96fbde362b2f6f059f630..7ea925816f79d8a0e547a0feb1257fda23097ba4 100644 (file)
@@ -177,17 +177,17 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
                        return node;
        }
 
-       if (!is_sup)
-               return NULL; /* Only supervision frame may create node entry */
+       /* Everyone may create a node entry, connected node to a HSR device. */
 
-       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+       if (ethhdr->h_proto == htons(ETH_P_PRP)
+                       || ethhdr->h_proto == htons(ETH_P_HSR)) {
                /* Use the existing sequence_nr from the tag as starting point
                 * for filtering duplicate frames.
                 */
                seq_out = hsr_get_skb_sequence_nr(skb) - 1;
        } else {
                WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
-               seq_out = 0;
+               seq_out = HSR_SEQNR_START;
        }
 
        return hsr_add_node(node_db, ethhdr->h_source, seq_out);
@@ -200,17 +200,25 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                          struct hsr_port *port_rcv)
 {
+       struct ethhdr *ethhdr;
        struct hsr_node *node_real;
        struct hsr_sup_payload *hsr_sp;
        struct list_head *node_db;
        int i;
 
-       skb_pull(skb, sizeof(struct hsr_ethhdr_sp));
-       hsr_sp = (struct hsr_sup_payload *) skb->data;
+       ethhdr = (struct ethhdr *) skb_mac_header(skb);
 
-       if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA))
-               /* Not sent from MacAddressB of a PICS_SUBS capable node */
-               goto done;
+       /* Leave the ethernet header. */
+       skb_pull(skb, sizeof(struct ethhdr));
+
+       /* And leave the HSR tag. */
+       if (ethhdr->h_proto == htons(ETH_P_HSR))
+               skb_pull(skb, sizeof(struct hsr_tag));
+
+       /* And leave the HSR sup tag. */
+       skb_pull(skb, sizeof(struct hsr_sup_tag));
+
+       hsr_sp = (struct hsr_sup_payload *) skb->data;
 
        /* Merge node_curr (registered on MacAddressB) into node_real */
        node_db = &port_rcv->hsr->node_db;
@@ -225,7 +233,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                /* Node has already been merged */
                goto done;
 
-       ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source);
+       ether_addr_copy(node_real->MacAddressB, ethhdr->h_source);
        for (i = 0; i < HSR_PT_PORTS; i++) {
                if (!node_curr->time_in_stale[i] &&
                    time_after(node_curr->time_in[i], node_real->time_in[i])) {
@@ -241,7 +249,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        kfree_rcu(node_curr, rcu_head);
 
 done:
-       skb_push(skb, sizeof(struct hsr_ethhdr_sp));
+       skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
 }
 
 
index 5a9c69962ded0284c62f6b0ab7767ceabecc6aa8..9b9909e89e9e855851b2fa6ab8bed321cdcfafc7 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define MAX_SLAVE_DIFF                  3000 /* ms */
 #define HSR_SEQNR_START                        (USHRT_MAX - 1024)
+#define HSR_SUP_SEQNR_START            (HSR_SEQNR_START / 2)
 
 
 /* How often shall we check for broken ring and remove node entries older than
@@ -58,6 +59,8 @@ struct hsr_tag {
 
 #define HSR_HLEN       6
 
+#define HSR_V1_SUP_LSDUSIZE            52
+
 /* The helper functions below assumes that 'path' occupies the 4 most
  * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
  * equivalently, the 4 most significant bits of HSR tag byte 14).
@@ -131,8 +134,14 @@ static inline void set_hsr_stag_HSR_Ver(struct hsr_sup_tag *hst, u16 HSR_Ver)
        set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver);
 }
 
-struct hsr_ethhdr_sp {
+struct hsrv0_ethhdr_sp {
+       struct ethhdr           ethhdr;
+       struct hsr_sup_tag      hsr_sup;
+} __packed;
+
+struct hsrv1_ethhdr_sp {
        struct ethhdr           ethhdr;
+       struct hsr_tag          hsr;
        struct hsr_sup_tag      hsr_sup;
 } __packed;
 
@@ -162,6 +171,8 @@ struct hsr_priv {
        struct timer_list       prune_timer;
        int announce_count;
        u16 sequence_nr;
+       u16 sup_sequence_nr;                    /* For HSRv1 separate seq_nr for supervision */
+       u8 protVersion;                                 /* Indicate if HSRv0 or HSRv1. */
        spinlock_t seqnr_lock;                  /* locking for sequence_nr */
        unsigned char           sup_multicast_addr[ETH_ALEN];
 };
index a2c7e4c0ac1ed8929f14786a09965663333f9619..d4d1617f43a8bfb842a8017dc51655ae1065942b 100644 (file)
@@ -23,7 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
        [IFLA_HSR_SLAVE1]               = { .type = NLA_U32 },
        [IFLA_HSR_SLAVE2]               = { .type = NLA_U32 },
        [IFLA_HSR_MULTICAST_SPEC]       = { .type = NLA_U8 },
-       [IFLA_HSR_SUPERVISION_ADDR]     = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [IFLA_HSR_VERSION]      = { .type = NLA_U8 },
+       [IFLA_HSR_SUPERVISION_ADDR]     = { .len = ETH_ALEN },
        [IFLA_HSR_SEQ_NR]               = { .type = NLA_U16 },
 };
 
@@ -35,7 +36,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[])
 {
        struct net_device *link[2];
-       unsigned char multicast_spec;
+       unsigned char multicast_spec, hsr_version;
 
        if (!data) {
                netdev_info(dev, "HSR: No slave devices specified\n");
@@ -62,7 +63,12 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        else
                multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
 
-       return hsr_dev_finalize(dev, link, multicast_spec);
+       if (!data[IFLA_HSR_VERSION])
+               hsr_version = 0;
+       else
+               hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+
+       return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
 }
 
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -115,10 +121,9 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
 
 
 /* attribute policy */
-/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */
 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
-       [HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
-       [HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
+       [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
        [HSR_A_IFINDEX] = { .type = NLA_U32 },
        [HSR_A_IF1_AGE] = { .type = NLA_U32 },
        [HSR_A_IF2_AGE] = { .type = NLA_U32 },
index 7d37366cc695554ae243f940869b46d26f598b65..f5b60388d02fa255eb20362872be62eb96ce73ea 100644 (file)
@@ -22,6 +22,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 {
        struct sk_buff *skb = *pskb;
        struct hsr_port *port;
+       u16 protocol;
 
        if (!skb_mac_header_was_set(skb)) {
                WARN_ONCE(1, "%s: skb invalid", __func__);
@@ -37,7 +38,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
                goto finish_consume;
        }
 
-       if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
+       protocol = eth_hdr(skb)->h_proto;
+       if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR))
                goto finish_pass;
 
        skb_push(skb, ETH_HLEN);
index b4e17a7c0df0838f7aed6ac4fa4705a70699a7a8..5ac778962e4ee4895903f707c00d7750a104b3bd 100644 (file)
@@ -41,24 +41,12 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
                return (((__force u64)a->extended_addr) >> 32) ^
                        (((__force u64)a->extended_addr) & 0xffffffff);
        case IEEE802154_ADDR_SHORT:
-               return (__force u32)(a->short_addr);
+               return (__force u32)(a->short_addr + (a->pan_id << 16));
        default:
                return 0;
        }
 }
 
-/* private device info */
-struct lowpan_dev_info {
-       struct net_device       *wdev; /* wpan device ptr */
-       u16                     fragment_tag;
-};
-
-static inline struct
-lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
-{
-       return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
-}
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
index 0023c904881246ab00432c1bcb3454df1c79636a..dd085db8580ee79a1a87e0c30b33d83df4a14e6a 100644 (file)
@@ -148,7 +148,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
                return -EBUSY;
        }
 
-       lowpan_dev_info(ldev)->wdev = wdev;
+       lowpan_802154_dev(ldev)->wdev = wdev;
        /* Set the lowpan hardware address to the wpan hardware address. */
        memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
        /* We need headroom for possible wpan_dev_hard_header call and tailroom
@@ -173,7 +173,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
 
 static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
 {
-       struct net_device *wdev = lowpan_dev_info(ldev)->wdev;
+       struct net_device *wdev = lowpan_802154_dev(ldev)->wdev;
 
        ASSERT_RTNL();
 
@@ -184,7 +184,7 @@ static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
        .kind           = "lowpan",
-       .priv_size      = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
+       .priv_size      = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)),
        .setup          = lowpan_setup,
        .newlink        = lowpan_newlink,
        .dellink        = lowpan_dellink,
index d4353faced35cf64f98a68fe6a749120cff411d2..e459afd16bb309ddb386f6fb762fb88abe3c4026 100644 (file)
@@ -84,7 +84,7 @@ static struct sk_buff*
 lowpan_alloc_frag(struct sk_buff *skb, int size,
                  const struct ieee802154_hdr *master_hdr, bool frag1)
 {
-       struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
+       struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
        struct sk_buff *frag;
        int rc;
 
@@ -148,8 +148,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
        int frag_cap, frag_len, payload_cap, rc;
        int skb_unprocessed, skb_offset;
 
-       frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
-       lowpan_dev_info(ldev)->fragment_tag++;
+       frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
+       lowpan_802154_dev(ldev)->fragment_tag++;
 
        frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
        frag_hdr[1] = dgram_size & 0xff;
@@ -208,7 +208,7 @@ err:
 static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
                         u16 *dgram_size, u16 *dgram_offset)
 {
-       struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
+       struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
        struct ieee802154_addr sa, da;
        struct ieee802154_mac_cb *cb = mac_cb_init(skb);
        struct lowpan_addr_info info;
@@ -248,8 +248,8 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
                cb->ackreq = wpan_dev->ackreq;
        }
 
-       return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
-                                   0);
+       return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da,
+                                   &sa, 0);
 }
 
 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
@@ -283,7 +283,7 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
        max_single = ieee802154_max_payload(&wpan_hdr);
 
        if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
-               skb->dev = lowpan_dev_info(ldev)->wdev;
+               skb->dev = lowpan_802154_dev(ldev)->wdev;
                ldev->stats.tx_packets++;
                ldev->stats.tx_bytes += dgram_size;
                return dev_queue_xmit(skb);
index 3503c38954f9f9e09e88706357c838a37b46ee17..d3cbb325871872649180937a53e269010476e9e1 100644 (file)
 
 #include "ieee802154.h"
 
-static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr)
+static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr,
+                         int padattr)
 {
-       return nla_put_u64(msg, type, swab64((__force u64)hwaddr));
+       return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr),
+                                padattr);
 }
 
 static __le64 nla_get_hwaddr(const struct nlattr *nla)
@@ -623,7 +625,8 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
 
                if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
                    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
-                                  desc->device_addr.extended_addr))
+                                  desc->device_addr.extended_addr,
+                                  IEEE802154_ATTR_PAD))
                        return -EMSGSIZE;
        }
 
@@ -638,7 +641,7 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
 
        if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
            nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
-                          desc->extended_source))
+                          desc->extended_source, IEEE802154_ATTR_PAD))
                return -EMSGSIZE;
 
        return 0;
@@ -1063,7 +1066,8 @@ ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
            nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
                              desc->short_addr) ||
-           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr,
+                          IEEE802154_ATTR_PAD) ||
            nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
                        desc->frame_counter) ||
            nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
@@ -1167,7 +1171,8 @@ ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
 
        if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
            nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
-           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr,
+                          IEEE802154_ATTR_PAD) ||
            nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
                        devkey->frame_counter) ||
            ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
index 16ef0d9f566e8436e55f0263c51585ae5ab751b9..ca207dbf673bc0d203005ea90c7b5f1d787a4840 100644 (file)
@@ -722,7 +722,8 @@ ieee802154_llsec_send_key_id(struct sk_buff *msg,
                        break;
                case NL802154_DEV_ADDR_EXTENDED:
                        if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
-                                        desc->device_addr.extended_addr))
+                                        desc->device_addr.extended_addr,
+                                        NL802154_DEV_ADDR_ATTR_PAD))
                                return -ENOBUFS;
                        break;
                default:
@@ -742,7 +743,8 @@ ieee802154_llsec_send_key_id(struct sk_buff *msg,
                break;
        case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
                if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
-                                desc->extended_source))
+                                desc->extended_source,
+                                NL802154_KEY_ID_ATTR_PAD))
                        return -ENOBUFS;
                break;
        default:
@@ -811,7 +813,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
 
        if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
            nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
-           nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) ||
+           nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
+                             wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) ||
            nla_put_u32(msg, NL802154_ATTR_GENERATION,
                        rdev->devlist_generation ^
                        (cfg802154_rdev_list_generation << 2)))
@@ -819,7 +822,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
 
        /* address settings */
        if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR,
-                        wpan_dev->extended_addr) ||
+                        wpan_dev->extended_addr,
+                        NL802154_ATTR_PAD) ||
            nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR,
                         wpan_dev->short_addr) ||
            nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id))
@@ -1074,6 +1078,11 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
        if (netif_running(dev))
                return -EBUSY;
 
+       if (wpan_dev->lowpan_dev) {
+               if (netif_running(wpan_dev->lowpan_dev))
+                       return -EBUSY;
+       }
+
        /* don't change address fields on monitor */
        if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
            !info->attrs[NL802154_ATTR_PAN_ID])
@@ -1105,6 +1114,11 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
        if (netif_running(dev))
                return -EBUSY;
 
+       if (wpan_dev->lowpan_dev) {
+               if (netif_running(wpan_dev->lowpan_dev))
+                       return -EBUSY;
+       }
+
        /* don't change address fields on monitor */
        if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
            !info->attrs[NL802154_ATTR_SHORT_ADDR])
@@ -1614,7 +1628,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
            nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
                         dev_desc->short_addr) ||
            nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
-                        dev_desc->hwaddr) ||
+                        dev_desc->hwaddr, NL802154_DEV_ATTR_PAD) ||
            nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
                       dev_desc->seclevel_exempt) ||
            nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
@@ -1778,7 +1792,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
                goto nla_put_failure;
 
        if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
-                        extended_addr) ||
+                        extended_addr, NL802154_DEVKEY_ATTR_PAD) ||
            nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
                        devkey->frame_counter))
                goto nla_put_failure;
index 8217cd22f921d1eef025f2cda12bcb7f4b751384..2e6e65fc4d203b91a06075e02d2dd1ac8141f3db 100644 (file)
@@ -1195,12 +1195,12 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
+       bool udpfrag = false, fixedid = false, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
        unsigned int offset = 0;
-       bool udpfrag, encap;
        struct iphdr *iph;
-       int proto;
+       int proto, tot_len;
        int nhoff;
        int ihl;
        int id;
@@ -1217,7 +1217,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                       SKB_GSO_TCPV6 |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_UDP_TUNNEL_CSUM |
+                      SKB_GSO_TCP_FIXEDID |
                       SKB_GSO_TUNNEL_REMCSUM |
+                      SKB_GSO_PARTIAL |
                       0)))
                goto out;
 
@@ -1248,11 +1250,14 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
-       if (skb->encapsulation &&
-           skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
-               udpfrag = proto == IPPROTO_UDP && encap;
-       else
-               udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+       if (!skb->encapsulation || encap) {
+               udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
+               fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
+
+               /* fixed ID is invalid if DF bit is not set */
+               if (fixedid && !(iph->frag_off & htons(IP_DF)))
+                       goto out;
+       }
 
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
@@ -1265,15 +1270,25 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
                if (udpfrag) {
-                       iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
                        if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
+                       tot_len = skb->len - nhoff;
+               } else if (skb_is_gso(skb)) {
+                       if (!fixedid) {
+                               iph->id = htons(id);
+                               id += skb_shinfo(skb)->gso_segs;
+                       }
+                       tot_len = skb_shinfo(skb)->gso_size +
+                                 SKB_GSO_CB(skb)->data_offset +
+                                 skb->head - (unsigned char *)iph;
                } else {
-                       iph->id = htons(id++);
+                       if (!fixedid)
+                               iph->id = htons(id++);
+                       tot_len = skb->len - nhoff;
                }
-               iph->tot_len = htons(skb->len - nhoff);
+               iph->tot_len = htons(tot_len);
                ip_send_check(iph);
                if (encap)
                        skb_reset_inner_headers(skb);
@@ -1325,6 +1340,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
 
        for (p = *head; p; p = p->next) {
                struct iphdr *iph2;
+               u16 flush_id;
 
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
@@ -1348,16 +1364,36 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                        (iph->tos ^ iph2->tos) |
                        ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
 
-               /* Save the IP ID check to be included later when we get to
-                * the transport layer so only the inner most IP ID is checked.
-                * This is because some GSO/TSO implementations do not
-                * correctly increment the IP ID for the outer hdrs.
-                */
-               NAPI_GRO_CB(p)->flush_id =
-                           ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
                NAPI_GRO_CB(p)->flush |= flush;
+
+               /* We need to store of the IP ID check to be included later
+                * when we can verify that this packet does in fact belong
+                * to a given flow.
+                */
+               flush_id = (u16)(id - ntohs(iph2->id));
+
+               /* This bit of code makes it much easier for us to identify
+                * the cases where we are doing atomic vs non-atomic IP ID
+                * checks.  Specifically an atomic check can return IP ID
+                * values 0 - 0xFFFF, while a non-atomic check can only
+                * return 0 or 0xFFFF.
+                */
+               if (!NAPI_GRO_CB(p)->is_atomic ||
+                   !(iph->frag_off & htons(IP_DF))) {
+                       flush_id ^= NAPI_GRO_CB(p)->count;
+                       flush_id = flush_id ? 0xFFFF : 0;
+               }
+
+               /* If the previous IP ID value was based on an atomic
+                * datagram we can overwrite the value and ignore it.
+                */
+               if (NAPI_GRO_CB(skb)->is_atomic)
+                       NAPI_GRO_CB(p)->flush_id = flush_id;
+               else
+                       NAPI_GRO_CB(p)->flush_id |= flush_id;
        }
 
+       NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
        NAPI_GRO_CB(skb)->flush |= flush;
        skb_set_network_header(skb, off);
        /* The above will be needed by the transport layer if there is one
index c34c7544d1db2ac1c580585a204b8f459f9a3a43..89a8cac4726a5e354371bb0c76a7e60cd2d7026e 100644 (file)
@@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
        if (IS_ERR(rt))
                return 1;
        if (rt->dst.dev != dev) {
-               NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
+               __NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
                flag = 1;
        }
        ip_rt_put(rt);
index 8a9246deccfeb0c7ed6a4272c60a907750e907e8..ef2ebeb89d0fb0cb9cb5da4aea13183097baec8c 100644 (file)
@@ -110,6 +110,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
        return tb;
 }
+EXPORT_SYMBOL_GPL(fib_new_table);
 
 /* caller must hold either rtnl or rcu read lock */
 struct fib_table *fib_get_table(struct net *net, u32 id)
@@ -904,7 +905,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
                if (!prim) {
-                       pr_warn("%s: bug: prim == NULL\n", __func__);
+                       /* if the device has been deleted, we don't perform
+                        * address promotion
+                        */
+                       if (!in_dev->dead)
+                               pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
                if (iprim && iprim != prim) {
index d97268e8ff103bd229de4fb99bbb40f51e61082d..ab64d9f2eef91dc22829a2eb442a6c887c9a8838 100644 (file)
@@ -1559,21 +1559,45 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
+static bool fib_good_nh(const struct fib_nh *nh)
+{
+       int state = NUD_REACHABLE;
+
+       if (nh->nh_scope == RT_SCOPE_LINK) {
+               struct neighbour *n;
+
+               rcu_read_lock_bh();
+
+               n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw);
+               if (n)
+                       state = n->nud_state;
+
+               rcu_read_unlock_bh();
+       }
+
+       return !!(state & NUD_VALID);
+}
 
 void fib_select_multipath(struct fib_result *res, int hash)
 {
        struct fib_info *fi = res->fi;
+       struct net *net = fi->fib_net;
+       bool first = false;
 
        for_nexthops(fi) {
                if (hash > atomic_read(&nh->nh_upper_bound))
                        continue;
 
-               res->nh_sel = nhsel;
-               return;
+               if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
+                   fib_good_nh(nh)) {
+                       res->nh_sel = nhsel;
+                       return;
+               }
+               if (!first) {
+                       res->nh_sel = nhsel;
+                       first = true;
+               }
        } endfor_nexthops(fi);
-
-       /* Race condition: route has just become dead. */
-       res->nh_sel = 0;
 }
 #endif
 
index 5738b97710676237ef2def3e149c4b82b7eebd66..eeec7d60e5fd171bde955111c62491d6ce3b4ec5 100644 (file)
@@ -202,6 +202,9 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
         */
        NAPI_GRO_CB(skb)->encap_mark = 0;
 
+       /* Flag this frame as already having an outer encap header */
+       NAPI_GRO_CB(skb)->is_fou = 1;
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
@@ -224,8 +227,6 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
        int err = -ENOSYS;
        const struct net_offload **offloads;
 
-       udp_tunnel_gro_complete(skb, nhoff);
-
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
@@ -234,6 +235,8 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
 
        err = ops->callbacks.gro_complete(skb, nhoff);
 
+       skb_set_inner_mac_header(skb, nhoff);
+
 out_unlock:
        rcu_read_unlock();
 
@@ -367,6 +370,9 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
         */
        NAPI_GRO_CB(skb)->encap_mark = 0;
 
+       /* Flag this frame as already having an outer encap header */
+       NAPI_GRO_CB(skb)->is_fou = 1;
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[guehdr->proto_ctype]);
@@ -406,6 +412,8 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 
        err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
 
+       skb_set_inner_mac_header(skb, nhoff + guehlen);
+
 out_unlock:
        rcu_read_unlock();
        return err;
@@ -796,11 +804,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
        int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
                                                       SKB_GSO_UDP_TUNNEL;
        __be16 sport;
+       int err;
 
-       skb = iptunnel_handle_offloads(skb, type);
-
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               return err;
 
        sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
                                               skb, 0, 0, false);
@@ -820,6 +828,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
        __be16 sport;
        void *data;
        bool need_priv = false;
+       int err;
 
        if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
            skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -830,10 +839,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 
        optlen += need_priv ? GUE_LEN_PRIV : 0;
 
-       skb = iptunnel_handle_offloads(skb, type);
-
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               return err;
 
        /* Get source port (based on flow hash) before skb_push */
        sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
index d9c552a721fcb19f17ac02c1972be935d8f2f11e..d78e2eefc0f7348fc721f81341c5de766f99492f 100644 (file)
@@ -60,6 +60,67 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
+/* Fills in tpi and returns header length to be pulled. */
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+                    bool *csum_err)
+{
+       const struct gre_base_hdr *greh;
+       __be32 *options;
+       int hdr_len;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+               return -EINVAL;
+
+       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+       hdr_len = gre_calc_hlen(tpi->flags);
+
+       if (!pskb_may_pull(skb, hdr_len))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       tpi->proto = greh->protocol;
+
+       options = (__be32 *)(greh + 1);
+       if (greh->flags & GRE_CSUM) {
+               if (skb_checksum_simple_validate(skb)) {
+                       *csum_err = true;
+                       return -EINVAL;
+               }
+
+               skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+                                        null_compute_pseudo);
+               options++;
+       }
+
+       if (greh->flags & GRE_KEY) {
+               tpi->key = *options;
+               options++;
+       } else {
+               tpi->key = 0;
+       }
+       if (unlikely(greh->flags & GRE_SEQ)) {
+               tpi->seq = *options;
+               options++;
+       } else {
+               tpi->seq = 0;
+       }
+       /* WCCP version 1 and 2 protocol decoding.
+        * - Change protocol to IP
+        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+        */
+       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               tpi->proto = htons(ETH_P_IP);
+               if ((*(u8 *)options & 0xF0) != 0x40)
+                       hdr_len += 4;
+       }
+       return hdr_len;
+}
+EXPORT_SYMBOL(gre_parse_header);
+
 static int gre_rcv(struct sk_buff *skb)
 {
        const struct gre_protocol *proto;
index c47539d04b88a78457e1f6badc5770cc931b469c..e88190a8699ad9c900be71d80d8ff3f9e12471ff 100644 (file)
@@ -32,10 +32,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
+                                 SKB_GSO_TCP_FIXEDID |
                                  SKB_GSO_GRE |
                                  SKB_GSO_GRE_CSUM |
                                  SKB_GSO_IPIP |
-                                 SKB_GSO_SIT)))
+                                 SKB_GSO_SIT |
+                                 SKB_GSO_PARTIAL)))
                goto out;
 
        if (!skb->encapsulation)
@@ -86,7 +88,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        skb = segs;
        do {
                struct gre_base_hdr *greh;
-               __be32 *pcsum;
+               __sum16 *pcsum;
 
                /* Set up inner headers if we are offloading inner checksum */
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -106,10 +108,25 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                        continue;
 
                greh = (struct gre_base_hdr *)skb_transport_header(skb);
-               pcsum = (__be32 *)(greh + 1);
+               pcsum = (__sum16 *)(greh + 1);
+
+               if (skb_is_gso(skb)) {
+                       unsigned int partial_adj;
+
+                       /* Adjust checksum to account for the fact that
+                        * the partial checksum is based on actual size
+                        * whereas headers should be based on MSS size.
+                        */
+                       partial_adj = skb->len + skb_headroom(skb) -
+                                     SKB_GSO_CB(skb)->data_offset -
+                                     skb_shinfo(skb)->gso_size;
+                       *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
+               } else {
+                       *pcsum = 0;
+               }
 
-               *pcsum = 0;
-               *(__sum16 *)pcsum = gso_make_checksum(skb, 0);
+               *(pcsum + 1) = 0;
+               *pcsum = gso_make_checksum(skb, 0);
        } while ((skb = skb->next));
 out:
        return segs;
@@ -150,6 +167,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
                goto out;
 
+       /* We can only support GRE_CSUM if we can track the location of
+        * the GRE header.  In the case of FOU/GUE we cannot because the
+        * outer UDP header displaces the GRE header leaving us in a state
+        * of limbo.
+        */
+       if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
+               goto out;
+
        type = greh->protocol;
 
        rcu_read_lock();
@@ -267,6 +292,18 @@ static const struct net_offload gre_offload = {
 
 static int __init gre_offload_init(void)
 {
-       return inet_add_offload(&gre_offload, IPPROTO_GRE);
+       int err;
+
+       err = inet_add_offload(&gre_offload, IPPROTO_GRE);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (err)
+               return err;
+
+       err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
+       if (err)
+               inet_del_offload(&gre_offload, IPPROTO_GRE);
+#endif
+
+       return err;
 }
 device_initcall(gre_offload_init);
index 6333489771ed07bb2509500a2e48fed05643281a..38abe70e595fabf472aa8fe094e71d070f781164 100644 (file)
@@ -363,7 +363,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
                           icmp_param->data_len+icmp_param->head_len,
                           icmp_param->head_len,
                           ipc, rt, MSG_DONTWAIT) < 0) {
-               ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
+               __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
                ip_flush_pending_frames(sk);
        } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
                struct icmphdr *icmph = icmp_hdr(skb);
@@ -744,7 +744,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
         * avoid additional coding at protocol handlers.
         */
        if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
-               ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
                return;
        }
 
@@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb)
 out:
        return true;
 out_err:
-       ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+       __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
        return false;
 }
 
@@ -877,7 +877,7 @@ out_err:
 static bool icmp_redirect(struct sk_buff *skb)
 {
        if (skb->len < sizeof(struct iphdr)) {
-               ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
                return false;
        }
 
@@ -956,7 +956,7 @@ static bool icmp_timestamp(struct sk_buff *skb)
        return true;
 
 out_err:
-       ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+       __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
        return false;
 }
 
@@ -996,7 +996,7 @@ int icmp_rcv(struct sk_buff *skb)
                skb_set_network_header(skb, nh);
        }
 
-       ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
+       __ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
 
        if (skb_checksum_simple_validate(skb))
                goto csum_error;
@@ -1006,7 +1006,7 @@ int icmp_rcv(struct sk_buff *skb)
 
        icmph = icmp_hdr(skb);
 
-       ICMPMSGIN_INC_STATS_BH(net, icmph->type);
+       ICMPMSGIN_INC_STATS(net, icmph->type);
        /*
         *      18 is the highest 'known' ICMP type. Anything else is a mystery
         *
@@ -1052,9 +1052,9 @@ drop:
        kfree_skb(skb);
        return 0;
 csum_error:
-       ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS);
+       __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
 error:
-       ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+       __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
        goto drop;
 }
 
index ab69da2d2a77bf4a54eec468cd7f0b8ddea9bb60..fa8c39804bdbae867dd5c08f1e308202c1aa1c52 100644 (file)
@@ -427,7 +427,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
 route_err:
        ip_rt_put(rt);
 no_route:
-       IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+       __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_req);
@@ -466,7 +466,7 @@ route_err:
        ip_rt_put(rt);
 no_route:
        rcu_read_unlock();
-       IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+       __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
@@ -706,7 +706,9 @@ void inet_csk_destroy_sock(struct sock *sk)
 
        sk_refcnt_debug_release(sk);
 
+       local_bh_disable();
        percpu_counter_dec(sk->sk_prot->orphan_count);
+       local_bh_enable();
        sock_put(sk);
 }
 EXPORT_SYMBOL(inet_csk_destroy_sock);
index bd591eb81ec9c5b995965b3c9f8199d1d8563066..25af1243649b7083a5b20ef7116635acf0f4ceef 100644 (file)
@@ -66,7 +66,7 @@ static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
        mutex_unlock(&inet_diag_table_mutex);
 }
 
-static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
 {
        r->idiag_family = sk->sk_family;
 
@@ -89,6 +89,7 @@ static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
        r->id.idiag_dst[0] = sk->sk_daddr;
        }
 }
+EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
 
 static size_t inet_sk_attr_size(void)
 {
@@ -104,13 +105,50 @@ static size_t inet_sk_attr_size(void)
                + 64;
 }
 
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+                            struct inet_diag_msg *r, int ext,
+                            struct user_namespace *user_ns)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+
+       if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
+               goto errout;
+
+       /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+        * hence this needs to be included regardless of socket family.
+        */
+       if (ext & (1 << (INET_DIAG_TOS - 1)))
+               if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
+                       goto errout;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (r->idiag_family == AF_INET6) {
+               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+                       if (nla_put_u8(skb, INET_DIAG_TCLASS,
+                                      inet6_sk(sk)->tclass) < 0)
+                               goto errout;
+
+               if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
+                   nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
+                       goto errout;
+       }
+#endif
+
+       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+       r->idiag_inode = sock_i_ino(sk);
+
+       return 0;
+errout:
+       return 1;
+}
+EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
                      struct user_namespace *user_ns,
                      u32 portid, u32 seq, u16 nlmsg_flags,
                      const struct nlmsghdr *unlh)
 {
-       const struct inet_sock *inet = inet_sk(sk);
        const struct tcp_congestion_ops *ca_ops;
        const struct inet_diag_handler *handler;
        int ext = req->idiag_ext;
@@ -135,32 +173,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
 
-       if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
+       if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
                goto errout;
 
-       /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
-        * hence this needs to be included regardless of socket family.
-        */
-       if (ext & (1 << (INET_DIAG_TOS - 1)))
-               if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
-                       goto errout;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (r->idiag_family == AF_INET6) {
-               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-                       if (nla_put_u8(skb, INET_DIAG_TCLASS,
-                                      inet6_sk(sk)->tclass) < 0)
-                               goto errout;
-
-               if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
-                   nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
-                       goto errout;
-       }
-#endif
-
-       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
-       r->idiag_inode = sock_i_ino(sk);
-
        if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
                struct inet_diag_meminfo minfo = {
                        .idiag_rmem = sk_rmem_alloc_get(sk),
@@ -182,31 +197,32 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                goto out;
        }
 
-#define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
-
        if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
            icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                r->idiag_timer = 1;
                r->idiag_retrans = icsk->icsk_retransmits;
-               r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
+               r->idiag_expires =
+                       jiffies_to_msecs(icsk->icsk_timeout - jiffies);
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                r->idiag_timer = 4;
                r->idiag_retrans = icsk->icsk_probes_out;
-               r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
+               r->idiag_expires =
+                       jiffies_to_msecs(icsk->icsk_timeout - jiffies);
        } else if (timer_pending(&sk->sk_timer)) {
                r->idiag_timer = 2;
                r->idiag_retrans = icsk->icsk_probes_out;
-               r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
+               r->idiag_expires =
+                       jiffies_to_msecs(sk->sk_timer.expires - jiffies);
        } else {
                r->idiag_timer = 0;
                r->idiag_expires = 0;
        }
-#undef EXPIRES_IN_MS
 
        if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
-               attr = nla_reserve(skb, INET_DIAG_INFO,
-                                  handler->idiag_info_size);
+               attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
+                                        handler->idiag_info_size,
+                                        INET_DIAG_PAD);
                if (!attr)
                        goto errout;
 
@@ -1063,7 +1079,9 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
        }
 
        attr = handler->idiag_info_size
-               ? nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size)
+               ? nla_reserve_64bit(skb, INET_DIAG_INFO,
+                                   handler->idiag_info_size,
+                                   INET_DIAG_PAD)
                : NULL;
        if (attr)
                info = nla_data(attr);
index fcadb670f50b8fd3d0739111da014cd0abbc13e1..77c20a489218c9cf1865f397b83f43bc58457dc6 100644 (file)
@@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
                sk_nulls_del_node_init_rcu((struct sock *)tw);
-               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+               __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -438,6 +438,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                                                     const struct sock *sk2,
                                                     bool match_wildcard))
 {
+       struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
 
@@ -446,6 +447,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                    sk2->sk_family == sk->sk_family &&
                    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+                   inet_csk(sk2)->icsk_bind_hash == tb &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
                    saddr_same(sk, sk2, false))
                        return reuseport_add_sock(sk, sk2);
@@ -479,7 +481,11 @@ int __inet_hash(struct sock *sk, struct sock *osk,
                if (err)
                        goto unlock;
        }
-       hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+               sk->sk_family == AF_INET6)
+               hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+       else
+               hlist_add_head_rcu(&sk->sk_node, &ilb->head);
        sock_set_flag(sk, SOCK_RCU_FREE);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 unlock:
index c67f9bd7699c5a1d210f214fd54aeea6944ccecb..2065816748066986f0356df168c2d76fe2d53d85 100644 (file)
@@ -94,7 +94,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
 }
 
 /*
- * Enter the time wait state. This is called with locally disabled BH.
+ * Enter the time wait state.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
@@ -112,7 +112,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
-       spin_lock(&bhead->lock);
+       spin_lock_bh(&bhead->lock);
        tw->tw_tb = icsk->icsk_bind_hash;
        WARN_ON(!icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -138,7 +138,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       spin_unlock(lock);
+       spin_unlock_bh(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
@@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data)
        struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
 
        if (tw->tw_kill)
-               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
+               __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
        else
-               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
+               __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
        inet_twsk_kill(tw);
 }
 
index af18f1e4889eec42a162a370c2d0d049fe2a82bb..cbfb1808fcc490b94dc0bbdab6142acb8fa37815 100644 (file)
@@ -65,8 +65,8 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
 {
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
-       IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
+       __IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
+       __IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
@@ -157,7 +157,7 @@ sr_failed:
 
 too_many_hops:
        /* Tell the sender its packet died... */
-       IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+       __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
        icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
 drop:
        kfree_skb(skb);
index efbd47d1a53155496e236eb7d4d0c893ef533d1c..bbe7f72db9c157ba2d6c5292637c2f58ad39a123 100644 (file)
@@ -204,14 +204,14 @@ static void ip_expire(unsigned long arg)
                goto out;
 
        ipq_kill(qp);
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+       __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
        if (!inet_frag_evicting(&qp->q)) {
                struct sk_buff *head = qp->q.fragments;
                const struct iphdr *iph;
                int err;
 
-               IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+               __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
 
                if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
                        goto out;
@@ -291,7 +291,7 @@ static int ip_frag_too_far(struct ipq *qp)
                struct net *net;
 
                net = container_of(qp->q.net, struct net, ipv4.frags);
-               IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+               __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
        }
 
        return rc;
@@ -635,7 +635,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
 
        ip_send_check(iph);
 
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+       __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
        qp->q.fragments_tail = NULL;
        return 0;
@@ -647,7 +647,7 @@ out_nomem:
 out_oversize:
        net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
 out_fail:
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+       __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
        return err;
 }
 
@@ -658,7 +658,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
        int vif = l3mdev_master_ifindex_rcu(dev);
        struct ipq *qp;
 
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+       __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
        skb_orphan(skb);
 
        /* Lookup (or create) queue header */
@@ -675,7 +675,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
                return ret;
        }
 
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+       __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -ENOMEM;
 }
index 31936d387cfd58b75218d340f683880e486c7351..2b267e71ebf5f25dc38587c6b7922b93e890694f 100644 (file)
@@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev);
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static int ip_gre_calc_hlen(__be16 o_flags)
-{
-       int addend = 4;
-
-       if (o_flags & TUNNEL_CSUM)
-               addend += 4;
-       if (o_flags & TUNNEL_KEY)
-               addend += 4;
-       if (o_flags & TUNNEL_SEQ)
-               addend += 4;
-       return addend;
-}
-
-static __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
-       __be16 tflags = 0;
-
-       if (flags & GRE_CSUM)
-               tflags |= TUNNEL_CSUM;
-       if (flags & GRE_ROUTING)
-               tflags |= TUNNEL_ROUTING;
-       if (flags & GRE_KEY)
-               tflags |= TUNNEL_KEY;
-       if (flags & GRE_SEQ)
-               tflags |= TUNNEL_SEQ;
-       if (flags & GRE_STRICT)
-               tflags |= TUNNEL_STRICT;
-       if (flags & GRE_REC)
-               tflags |= TUNNEL_REC;
-       if (flags & GRE_VERSION)
-               tflags |= TUNNEL_VERSION;
-
-       return tflags;
-}
-
-static __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
-       __be16 flags = 0;
-
-       if (tflags & TUNNEL_CSUM)
-               flags |= GRE_CSUM;
-       if (tflags & TUNNEL_ROUTING)
-               flags |= GRE_ROUTING;
-       if (tflags & TUNNEL_KEY)
-               flags |= GRE_KEY;
-       if (tflags & TUNNEL_SEQ)
-               flags |= GRE_SEQ;
-       if (tflags & TUNNEL_STRICT)
-               flags |= GRE_STRICT;
-       if (tflags & TUNNEL_REC)
-               flags |= GRE_REC;
-       if (tflags & TUNNEL_VERSION)
-               flags |= GRE_VERSION;
-
-       return flags;
-}
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                           bool *csum_err)
-{
-       const struct gre_base_hdr *greh;
-       __be32 *options;
-       int hdr_len;
-
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
-       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-               return -EINVAL;
-
-       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-       hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-       if (!pskb_may_pull(skb, hdr_len))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
-       tpi->proto = greh->protocol;
-
-       options = (__be32 *)(greh + 1);
-       if (greh->flags & GRE_CSUM) {
-               if (skb_checksum_simple_validate(skb)) {
-                       *csum_err = true;
-                       return -EINVAL;
-               }
-
-               skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
-                                        null_compute_pseudo);
-               options++;
-       }
-
-       if (greh->flags & GRE_KEY) {
-               tpi->key = *options;
-               options++;
-       } else {
-               tpi->key = 0;
-       }
-       if (unlikely(greh->flags & GRE_SEQ)) {
-               tpi->seq = *options;
-               options++;
-       } else {
-               tpi->seq = 0;
-       }
-       /* WCCP version 1 and 2 protocol decoding.
-        * - Change protocol to IP
-        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-        */
-       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-               tpi->proto = htons(ETH_P_IP);
-               if ((*(u8 *)options & 0xF0) != 0x40) {
-                       hdr_len += 4;
-                       if (!pskb_may_pull(skb, hdr_len))
-                               return -EINVAL;
-               }
-       }
-       return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
-}
-
 static void ipgre_err(struct sk_buff *skb, u32 info,
                      const struct tnl_ptk_info *tpi)
 {
@@ -341,7 +222,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
        struct tnl_ptk_info tpi;
        bool csum_err = false;
 
-       if (parse_gre_header(skb, &tpi, &csum_err)) {
+       if (gre_parse_header(skb, &tpi, &csum_err) < 0) {
                if (!csum_err)          /* ignore csum errors. */
                        return;
        }
@@ -379,24 +260,22 @@ static __be32 tunnel_id_to_key(__be64 x)
 #endif
 }
 
-static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
+static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+                      struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 {
-       struct net *net = dev_net(skb->dev);
        struct metadata_dst *tun_dst = NULL;
-       struct ip_tunnel_net *itn;
        const struct iphdr *iph;
        struct ip_tunnel *tunnel;
 
-       if (tpi->proto == htons(ETH_P_TEB))
-               itn = net_generic(net, gre_tap_net_id);
-       else
-               itn = net_generic(net, ipgre_net_id);
-
        iph = ip_hdr(skb);
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
                                  iph->saddr, iph->daddr, tpi->key);
 
        if (tunnel) {
+               if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
+                                          raw_proto, false) < 0)
+                       goto drop;
+
                skb_pop_mac_header(skb);
                if (tunnel->collect_md) {
                        __be16 flags;
@@ -412,13 +291,41 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
                return PACKET_RCVD;
        }
-       return PACKET_REJECT;
+       return PACKET_NEXT;
+
+drop:
+       kfree_skb(skb);
+       return PACKET_RCVD;
+}
+
+static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+                    int hdr_len)
+{
+       struct net *net = dev_net(skb->dev);
+       struct ip_tunnel_net *itn;
+       int res;
+
+       if (tpi->proto == htons(ETH_P_TEB))
+               itn = net_generic(net, gre_tap_net_id);
+       else
+               itn = net_generic(net, ipgre_net_id);
+
+       res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
+       if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
+               /* ipgre tunnels in collect metadata mode should receive
+                * also ETH_P_TEB traffic.
+                */
+               itn = net_generic(net, ipgre_net_id);
+               res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
+       }
+       return res;
 }
 
 static int gre_rcv(struct sk_buff *skb)
 {
        struct tnl_ptk_info tpi;
        bool csum_err = false;
+       int hdr_len;
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -428,10 +335,11 @@ static int gre_rcv(struct sk_buff *skb)
        }
 #endif
 
-       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err);
+       if (hdr_len < 0)
                goto drop;
 
-       if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+       if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                return 0;
 
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -440,49 +348,6 @@ drop:
        return 0;
 }
 
-static __sum16 gre_checksum(struct sk_buff *skb)
-{
-       __wsum csum;
-
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               csum = lco_csum(skb);
-       else
-               csum = skb_checksum(skb, 0, skb->len, 0);
-       return csum_fold(csum);
-}
-
-static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
-                        __be16 proto, __be32 key, __be32 seq)
-{
-       struct gre_base_hdr *greh;
-
-       skb_push(skb, hdr_len);
-
-       skb_reset_transport_header(skb);
-       greh = (struct gre_base_hdr *)skb->data;
-       greh->flags = tnl_flags_to_gre_flags(flags);
-       greh->protocol = proto;
-
-       if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
-               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-               if (flags & TUNNEL_SEQ) {
-                       *ptr = seq;
-                       ptr--;
-               }
-               if (flags & TUNNEL_KEY) {
-                       *ptr = key;
-                       ptr--;
-               }
-               if (flags & TUNNEL_CSUM &&
-                   !(skb_shinfo(skb)->gso_type &
-                     (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
-                       *ptr = 0;
-                       *(__sum16 *)ptr = gre_checksum(skb);
-               }
-       }
-}
-
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
                       const struct iphdr *tnl_params,
                       __be16 proto)
@@ -493,15 +358,15 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
                tunnel->o_seqno++;
 
        /* Push GRE header. */
-       build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
-                    proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+       gre_build_header(skb, tunnel->tun_hlen,
+                        tunnel->parms.o_flags, proto, tunnel->parms.o_key,
+                        htonl(tunnel->o_seqno));
 
        skb_set_inner_protocol(skb, proto);
        ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
-static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-                                          bool csum)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
        return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
@@ -523,7 +388,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
        return ip_route_output_key(net, fl);
 }
 
-static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+                       __be16 proto)
 {
        struct ip_tunnel_info *tun_info;
        const struct ip_tunnel_key *key;
@@ -553,7 +419,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
                                          fl.saddr);
        }
 
-       tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+       tunnel_hlen = gre_calc_hlen(key->tun_flags);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + tunnel_hlen + sizeof(struct iphdr);
@@ -568,15 +434,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* Push Tunnel header. */
-       skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
-       if (IS_ERR(skb)) {
-               skb = NULL;
+       if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
                goto err_free_rt;
-       }
 
        flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
-       build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
-                    tunnel_id_to_key(tun_info->key.tun_id), 0);
+       gre_build_header(skb, tunnel_hlen, flags, proto,
+                        tunnel_id_to_key(tun_info->key.tun_id), 0);
 
        df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 
@@ -616,7 +479,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        const struct iphdr *tnl_params;
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, skb->protocol);
                return NETDEV_TX_OK;
        }
 
@@ -640,16 +503,14 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
                tnl_params = &tunnel->parms.iph;
        }
 
-       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-       if (IS_ERR(skb))
-               goto out;
+       if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+               goto free_skb;
 
        __gre_xmit(skb, dev, tnl_params, skb->protocol);
        return NETDEV_TX_OK;
 
 free_skb:
        kfree_skb(skb);
-out:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
@@ -660,13 +521,12 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
                return NETDEV_TX_OK;
        }
 
-       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-       if (IS_ERR(skb))
-               goto out;
+       if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+               goto free_skb;
 
        if (skb_cow_head(skb, dev->needed_headroom))
                goto free_skb;
@@ -676,7 +536,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 
 free_skb:
        kfree_skb(skb);
-out:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
@@ -702,8 +561,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
        if (err)
                return err;
 
-       p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
-       p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
+       p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
+       p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
 
        if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                return -EFAULT;
@@ -747,7 +606,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 
        iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
        greh = (struct gre_base_hdr *)(iph+1);
-       greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
+       greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
        greh->protocol = htons(type);
 
        memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
@@ -848,7 +707,7 @@ static void __gre_tunnel_init(struct net_device *dev)
        int t_hlen;
 
        tunnel = netdev_priv(dev);
-       tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
+       tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
        tunnel->parms.iph.protocol = IPPROTO_GRE;
 
        tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
@@ -862,9 +721,16 @@ static void __gre_tunnel_init(struct net_device *dev)
        dev->hw_features        |= GRE_FEATURES;
 
        if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
-               /* TCP offload with GRE SEQ is not supported. */
-               dev->features    |= NETIF_F_GSO_SOFTWARE;
-               dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+               /* TCP offload with GRE SEQ is not supported, nor
+                * can we support 2 levels of outer headers requiring
+                * an update.
+                */
+               if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
+                   (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
+                       dev->features    |= NETIF_F_GSO_SOFTWARE;
+                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+               }
+
                /* Can use a lockless transmit, unless we generate
                 * output sequences
                 */
@@ -886,7 +752,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
        netif_keep_dst(dev);
        dev->addr_len           = 4;
 
-       if (iph->daddr) {
+       if (iph->daddr && !tunnel->collect_md) {
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (ipv4_is_multicast(iph->daddr)) {
                        if (!iph->saddr)
@@ -895,8 +761,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
                        dev->header_ops = &ipgre_header_ops;
                }
 #endif
-       } else
+       } else if (!tunnel->collect_md) {
                dev->header_ops = &ipgre_header_ops;
+       }
 
        return ip_tunnel_init(dev);
 }
@@ -939,6 +806,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
        if (flags & (GRE_VERSION|GRE_ROUTING))
                return -EINVAL;
 
+       if (data[IFLA_GRE_COLLECT_METADATA] &&
+           data[IFLA_GRE_ENCAP_TYPE] &&
+           nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1156,8 +1028,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel_parm *p = &t->parms;
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
-           nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
-           nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS,
+                        gre_tnl_flags_to_gre_flags(p->i_flags)) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS,
+                        gre_tnl_flags_to_gre_flags(p->o_flags)) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
            nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
index e3d782746d9db79b0ad03d1059316bebb73207a4..4b351af3e67b60ba89ab38fb28dac42b6648274d 100644 (file)
@@ -218,17 +218,17 @@ static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_b
                                protocol = -ret;
                                goto resubmit;
                        }
-                       IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
+                       __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
                } else {
                        if (!raw) {
                                if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                                       IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
+                                       __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
                                        icmp_send(skb, ICMP_DEST_UNREACH,
                                                  ICMP_PROT_UNREACH, 0);
                                }
                                kfree_skb(skb);
                        } else {
-                               IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
+                               __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
                                consume_skb(skb);
                        }
                }
@@ -273,7 +273,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
                                              --ANK (980813)
        */
        if (skb_cow(skb, skb_headroom(skb))) {
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+               __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -282,7 +282,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
        opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
 
        if (ip_options_compile(dev_net(dev), opt, skb)) {
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+               __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
                goto drop;
        }
 
@@ -313,6 +313,13 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
+       /* if ingress device is enslaved to an L3 master device pass the
+        * skb to its handler for processing
+        */
+       skb = l3mdev_ip_rcv(skb);
+       if (!skb)
+               return NET_RX_SUCCESS;
+
        if (net->ipv4.sysctl_ip_early_demux &&
            !skb_dst(skb) &&
            !skb->sk &&
@@ -337,7 +344,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                                               iph->tos, skb->dev);
                if (unlikely(err)) {
                        if (err == -EXDEV)
-                               NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER);
+                               __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
                        goto drop;
                }
        }
@@ -358,9 +365,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        rt = skb_rtable(skb);
        if (rt->rt_type == RTN_MULTICAST) {
-               IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len);
+               __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
        } else if (rt->rt_type == RTN_BROADCAST) {
-               IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len);
+               __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
        } else if (skb->pkt_type == PACKET_BROADCAST ||
                   skb->pkt_type == PACKET_MULTICAST) {
                struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
@@ -409,11 +416,11 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
 
        net = dev_net(dev);
-       IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len);
+       __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
 
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+               __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
                goto out;
        }
 
@@ -439,9 +446,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
        BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
        BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
-       IP_ADD_STATS_BH(net,
-                       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
-                       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+       __IP_ADD_STATS(net,
+                      IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+                      max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
 
        if (!pskb_may_pull(skb, iph->ihl*4))
                goto inhdr_error;
@@ -453,7 +460,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
        len = ntohs(iph->tot_len);
        if (skb->len < len) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
+               __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
                goto drop;
        } else if (len < (iph->ihl*4))
                goto inhdr_error;
@@ -463,7 +470,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
         * Note this now means skb->len holds ntohs(iph->tot_len).
         */
        if (pskb_trim_rcsum(skb, len)) {
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+               __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -471,6 +478,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
        /* Remove any debris in the socket control block */
        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+       IPCB(skb)->iif = skb->skb_iif;
 
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
@@ -480,9 +488,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
                       ip_rcv_finish);
 
 csum_error:
-       IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS);
+       __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
 inhdr_error:
-       IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+       __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 drop:
        kfree_skb(skb);
 out:
index 89b5f3bd669436f7215703d92aaf7700145cdc8d..5805762d7fc79a702f1d67e802992b822f66f000 100644 (file)
@@ -106,7 +106,8 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
                return;
 
        if (offset != 0)
-               csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+               csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
+                                                  offset, 0));
 
        put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
 }
@@ -509,9 +510,10 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
                copied = len;
        }
        err = skb_copy_datagram_msg(skb, 0, msg, copied);
-       if (err)
-               goto out_free_skb;
-
+       if (unlikely(err)) {
+               kfree_skb(skb);
+               return err;
+       }
        sock_recv_timestamp(msg, sk, skb);
 
        serr = SKB_EXT_ERR(skb);
@@ -543,8 +545,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
        msg->msg_flags |= MSG_ERRQUEUE;
        err = copied;
 
-out_free_skb:
-       kfree_skb(skb);
+       consume_skb(skb);
 out:
        return err;
 }
@@ -1192,7 +1193,12 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                       ipv6_sk_rxinfo(sk);
 
        if (prepare && skb_rtable(skb)) {
-               pktinfo->ipi_ifindex = inet_iif(skb);
+               /* skb->cb is overloaded: prior to this point it is IP{6}CB
+                * which has interface index (iif) as the first member of the
+                * underlying inet{6}_skb_parm struct. This code then overlays
+                * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
+                * element so the iif is picked up from the prior IPCB
+                */
                pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
        } else {
                pktinfo->ipi_ifindex = 0;
index 6aad0192443d49966785f0de67ee30934775dfca..a69ed94bda1b107634f0aacb6dbedb3d03cc87b0 100644 (file)
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
-                       dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
-                                         fl4.saddr);
                        ip_rt_put(rt);
                }
                if (dev->type != ARPHRD_ETHER)
                        dev->flags |= IFF_POINTOPOINT;
+
+               dst_cache_reset(&tunnel->dst_cache);
        }
 
        if (!tdev && tunnel->parms.link)
index 43445df61efd984a56f3746f598d8f24ff04c718..9118b0e640ba3a96850cf28aa8caab4e615254d9 100644 (file)
@@ -146,8 +146,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
 }
 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
-                                        int gso_type_mask)
+int iptunnel_handle_offloads(struct sk_buff *skb,
+                            int gso_type_mask)
 {
        int err;
 
@@ -157,11 +157,11 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
        }
 
        if (skb_is_gso(skb)) {
-               err = skb_unclone(skb, GFP_ATOMIC);
+               err = skb_header_unclone(skb, GFP_ATOMIC);
                if (unlikely(err))
-                       goto error;
+                       return err;
                skb_shinfo(skb)->gso_type |= gso_type_mask;
-               return skb;
+               return 0;
        }
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -174,10 +174,7 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
                skb->encapsulation = 0;
        }
 
-       return skb;
-error:
-       kfree_skb(skb);
-       return ERR_PTR(err);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
 
@@ -274,7 +271,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 {
        struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-       if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+       if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
+                        LWTUNNEL_IP_PAD) ||
            nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
            nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
            nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
@@ -287,7 +285,7 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 
 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       return nla_total_size(8)        /* LWTUNNEL_IP_ID */
+       return nla_total_size_64bit(8)  /* LWTUNNEL_IP_ID */
                + nla_total_size(4)     /* LWTUNNEL_IP_DST */
                + nla_total_size(4)     /* LWTUNNEL_IP_SRC */
                + nla_total_size(1)     /* LWTUNNEL_IP_TOS */
@@ -369,7 +367,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
 {
        struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-       if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+       if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
+                        LWTUNNEL_IP6_PAD) ||
            nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
            nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
            nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
@@ -382,7 +381,7 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
 
 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       return nla_total_size(8)        /* LWTUNNEL_IP6_ID */
+       return nla_total_size_64bit(8)  /* LWTUNNEL_IP6_ID */
                + nla_total_size(16)    /* LWTUNNEL_IP6_DST */
                + nla_total_size(16)    /* LWTUNNEL_IP6_SRC */
                + nla_total_size(1)     /* LWTUNNEL_IP6_HOPLIMIT */
index 5cf10b777b7e4bb8b1037201a2c8bfeeb031c7cc..a917903d5e9742fb07bac1b2a7fa94ee069c0d54 100644 (file)
@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;        /* Device to other host */
        int err;
+       int mtu;
 
        if (!dst) {
                dev->stats.tx_carrier_errors++;
@@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                        tunnel->err_count = 0;
        }
 
+       mtu = dst_mtu(dst);
+       if (skb->len > mtu) {
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               } else {
+                       if (mtu < IPV6_MIN_MTU)
+                               mtu = IPV6_MIN_MTU;
+
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               }
+
+               dst_release(dst);
+               goto tx_error;
+       }
+
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index ec51d02166de66744f27092f1490bb635c9a70bc..92827483ee3d7f03881e5cededec380fa3f15885 100644 (file)
@@ -219,9 +219,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb->protocol != htons(ETH_P_IP)))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-       if (IS_ERR(skb))
-               goto out;
+       if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+               goto tx_error;
 
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
@@ -230,7 +229,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
 tx_error:
        kfree_skb(skb);
-out:
+
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
index 395e2814a46d9995b63b6bc703e28433ecc26c2b..21a38e296fe2da2c4a095c94ab204362d9eacc89 100644 (file)
@@ -2104,7 +2104,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
-       if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
+       if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
                return -EMSGSIZE;
 
        rtm->rtm_type = RTN_MULTICAST;
@@ -2237,7 +2237,7 @@ static size_t mroute_msgsize(bool unresolved, int maxvif)
                      + nla_total_size(0)       /* RTA_MULTIPATH */
                      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
                                                /* RTA_MFC_STATS */
-                     + nla_total_size(sizeof(struct rta_mfc_stats))
+                     + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
                ;
 
        return len;
index 4133b0f513afe5a40e523511f24b4652c68b1b57..2033f929aa662026391900aeb074ee7388e1a96e 100644 (file)
@@ -34,27 +34,6 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
 MODULE_DESCRIPTION("arptables core");
 
-/*#define DEBUG_ARP_TABLES*/
-/*#define DEBUG_ARP_TABLES_USER*/
-
-#ifdef DEBUG_ARP_TABLES
-#define dprintf(format, args...)  pr_debug(format, ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_ARP_TABLES_USER
-#define duprintf(format, args...) pr_debug(format, ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-#ifdef CONFIG_NETFILTER_DEBUG
-#define ARP_NF_ASSERT(x)       WARN_ON(!(x))
-#else
-#define ARP_NF_ASSERT(x)
-#endif
-
 void *arpt_alloc_initial_table(const struct xt_table *info)
 {
        return xt_alloc_initial_table(arpt, ARPT);
@@ -113,36 +92,20 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
 
        if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
-                 ARPT_INV_ARPOP)) {
-               dprintf("ARP operation field mismatch.\n");
-               dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
-                       arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask);
+                 ARPT_INV_ARPOP))
                return 0;
-       }
 
        if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd,
-                 ARPT_INV_ARPHRD)) {
-               dprintf("ARP hardware address format mismatch.\n");
-               dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
-                       arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask);
+                 ARPT_INV_ARPHRD))
                return 0;
-       }
 
        if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro,
-                 ARPT_INV_ARPPRO)) {
-               dprintf("ARP protocol address format mismatch.\n");
-               dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
-                       arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask);
+                 ARPT_INV_ARPPRO))
                return 0;
-       }
 
        if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln,
-                 ARPT_INV_ARPHLN)) {
-               dprintf("ARP hardware address length mismatch.\n");
-               dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
-                       arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask);
+                 ARPT_INV_ARPHLN))
                return 0;
-       }
 
        src_devaddr = arpptr;
        arpptr += dev->addr_len;
@@ -155,49 +118,25 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
        if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len),
                  ARPT_INV_SRCDEVADDR) ||
            FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len),
-                 ARPT_INV_TGTDEVADDR)) {
-               dprintf("Source or target device address mismatch.\n");
-
+                 ARPT_INV_TGTDEVADDR))
                return 0;
-       }
 
        if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr,
                  ARPT_INV_SRCIP) ||
            FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr),
-                 ARPT_INV_TGTIP)) {
-               dprintf("Source or target IP address mismatch.\n");
-
-               dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
-                       &src_ipaddr,
-                       &arpinfo->smsk.s_addr,
-                       &arpinfo->src.s_addr,
-                       arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
-               dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
-                       &tgt_ipaddr,
-                       &arpinfo->tmsk.s_addr,
-                       &arpinfo->tgt.s_addr,
-                       arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
+                 ARPT_INV_TGTIP))
                return 0;
-       }
 
        /* Look for ifname matches.  */
        ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
 
-       if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
-               dprintf("VIA in mismatch (%s vs %s).%s\n",
-                       indev, arpinfo->iniface,
-                       arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : "");
+       if (FWINV(ret != 0, ARPT_INV_VIA_IN))
                return 0;
-       }
 
        ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
 
-       if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
-               dprintf("VIA out mismatch (%s vs %s).%s\n",
-                       outdev, arpinfo->outiface,
-                       arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : "");
+       if (FWINV(ret != 0, ARPT_INV_VIA_OUT))
                return 0;
-       }
 
        return 1;
 #undef FWINV
@@ -205,16 +144,10 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
 
 static inline int arp_checkentry(const struct arpt_arp *arp)
 {
-       if (arp->flags & ~ARPT_F_MASK) {
-               duprintf("Unknown flag bits set: %08X\n",
-                        arp->flags & ~ARPT_F_MASK);
+       if (arp->flags & ~ARPT_F_MASK)
                return 0;
-       }
-       if (arp->invflags & ~ARPT_INV_MASK) {
-               duprintf("Unknown invflag bits set: %08X\n",
-                        arp->invflags & ~ARPT_INV_MASK);
+       if (arp->invflags & ~ARPT_INV_MASK)
                return 0;
-       }
 
        return 1;
 }
@@ -367,6 +300,18 @@ static inline bool unconditional(const struct arpt_entry *e)
               memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
 }
 
+static bool find_jump_target(const struct xt_table_info *t,
+                            const struct arpt_entry *target)
+{
+       struct arpt_entry *iter;
+
+       xt_entry_foreach(iter, t->entries, t->size) {
+                if (iter == target)
+                       return true;
+       }
+       return false;
+}
+
 /* Figures out from what hook each rule can be called: returns 0 if
  * there are loops.  Puts hook bitmask in comefrom.
  */
@@ -394,11 +339,9 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                = (void *)arpt_get_target_c(e);
                        int visited = e->comefrom & (1 << hook);
 
-                       if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
-                               pr_notice("arptables: loop hook %u pos %u %08X.\n",
-                                      hook, pos, e->comefrom);
+                       if (e->comefrom & (1 << NF_ARP_NUMHOOKS))
                                return 0;
-                       }
+
                        e->comefrom
                                |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
 
@@ -411,12 +354,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
 
                                if ((strcmp(t->target.u.user.name,
                                            XT_STANDARD_TARGET) == 0) &&
-                                   t->verdict < -NF_MAX_VERDICT - 1) {
-                                       duprintf("mark_source_chains: bad "
-                                               "negative verdict (%i)\n",
-                                                               t->verdict);
+                                   t->verdict < -NF_MAX_VERDICT - 1)
                                        return 0;
-                               }
 
                                /* Return: backtrack through the last
                                 * big jump.
@@ -439,6 +378,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                size = e->next_offset;
                                e = (struct arpt_entry *)
                                        (entry0 + pos + size);
+                               if (pos + size >= newinfo->size)
+                                       return 0;
                                e->counters.pcnt = pos;
                                pos += size;
                        } else {
@@ -447,20 +388,16 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           XT_STANDARD_TARGET) == 0 &&
                                    newpos >= 0) {
-                                       if (newpos > newinfo->size -
-                                               sizeof(struct arpt_entry)) {
-                                               duprintf("mark_source_chains: "
-                                                       "bad verdict (%i)\n",
-                                                               newpos);
-                                               return 0;
-                                       }
-
                                        /* This a jump; chase it. */
-                                       duprintf("Jump rule %u -> %u\n",
-                                                pos, newpos);
+                                       e = (struct arpt_entry *)
+                                               (entry0 + newpos);
+                                       if (!find_jump_target(newinfo, e))
+                                               return 0;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
+                                       if (newpos >= newinfo->size)
+                                               return 0;
                                }
                                e = (struct arpt_entry *)
                                        (entry0 + newpos);
@@ -468,33 +405,14 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-next:
-               duprintf("Finished chain %u\n", hook);
+next:          ;
        }
        return 1;
 }
 
-static inline int check_entry(const struct arpt_entry *e)
-{
-       const struct xt_entry_target *t;
-
-       if (!arp_checkentry(&e->arp))
-               return -EINVAL;
-
-       if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
-               return -EINVAL;
-
-       t = arpt_get_target_c(e);
-       if (e->target_offset + t->u.target_size > e->next_offset)
-               return -EINVAL;
-
-       return 0;
-}
-
 static inline int check_target(struct arpt_entry *e, const char *name)
 {
        struct xt_entry_target *t = arpt_get_target(e);
-       int ret;
        struct xt_tgchk_param par = {
                .table     = name,
                .entryinfo = e,
@@ -504,13 +422,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
                .family    = NFPROTO_ARP,
        };
 
-       ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
-       if (ret < 0) {
-               duprintf("arp_tables: check failed for `%s'.\n",
-                        t->u.kernel.target->name);
-               return ret;
-       }
-       return 0;
+       return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
 }
 
 static inline int
@@ -518,17 +430,18 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
 {
        struct xt_entry_target *t;
        struct xt_target *target;
+       unsigned long pcnt;
        int ret;
 
-       e->counters.pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(e->counters.pcnt))
+       pcnt = xt_percpu_counter_alloc();
+       if (IS_ERR_VALUE(pcnt))
                return -ENOMEM;
+       e->counters.pcnt = pcnt;
 
        t = arpt_get_target(e);
        target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
                ret = PTR_ERR(target);
                goto out;
        }
@@ -574,19 +487,18 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
 
        if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
            (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p\n", e);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset
-           < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+           < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
                return -EINVAL;
-       }
 
-       err = check_entry(e);
+       if (!arp_checkentry(&e->arp))
+               return -EINVAL;
+
+       err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+                                    e->next_offset);
        if (err)
                return err;
 
@@ -597,12 +509,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
                if ((unsigned char *)e - base == hook_entries[h])
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
-                       if (!check_underflow(e)) {
-                               pr_debug("Underflows must be unconditional and "
-                                        "use the STANDARD target with "
-                                        "ACCEPT/DROP\n");
+                       if (!check_underflow(e))
                                return -EINVAL;
-                       }
+
                        newinfo->underflow[h] = underflows[h];
                }
        }
@@ -647,7 +556,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                newinfo->underflow[i] = 0xFFFFFFFF;
        }
 
-       duprintf("translate_table: size %u\n", newinfo->size);
        i = 0;
 
        /* Walk through entries, checking offsets. */
@@ -664,37 +572,25 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                    XT_ERROR_TARGET) == 0)
                        ++newinfo->stacksize;
        }
-       duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
        if (ret != 0)
                return ret;
 
-       if (i != repl->num_entries) {
-               duprintf("translate_table: %u not %u entries\n",
-                        i, repl->num_entries);
+       if (i != repl->num_entries)
                return -EINVAL;
-       }
 
        /* Check hooks all assigned */
        for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
                /* Only hooks which are valid */
                if (!(repl->valid_hooks & (1 << i)))
                        continue;
-               if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, repl->hook_entry[i]);
+               if (newinfo->hook_entry[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
-               if (newinfo->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, repl->underflow[i]);
+               if (newinfo->underflow[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
        }
 
-       if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
-               duprintf("Looping hook\n");
+       if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
                return -ELOOP;
-       }
 
        /* Finally, each sanity check must pass */
        i = 0;
@@ -898,11 +794,8 @@ static int get_info(struct net *net, void __user *user,
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct arpt_getinfo)) {
-               duprintf("length %u != %Zu\n", *len,
-                        sizeof(struct arpt_getinfo));
+       if (*len != sizeof(struct arpt_getinfo))
                return -EINVAL;
-       }
 
        if (copy_from_user(name, user, sizeof(name)) != 0)
                return -EFAULT;
@@ -958,33 +851,25 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
        struct arpt_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
-       if (*len != sizeof(struct arpt_get_entries) + get.size) {
-               duprintf("get_entries: %u != %Zu\n", *len,
-                        sizeof(struct arpt_get_entries) + get.size);
+       if (*len != sizeof(struct arpt_get_entries) + get.size)
                return -EINVAL;
-       }
+
        get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR_OR_NULL(t)) {
                const struct xt_table_info *private = t->private;
 
-               duprintf("t->private->number = %u\n",
-                        private->number);
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
-               else {
-                       duprintf("get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               else
                        ret = -EAGAIN;
-               }
+
                module_put(t->me);
                xt_table_unlock(t);
        } else
@@ -1022,8 +907,6 @@ static int __do_replace(struct net *net, const char *name,
 
        /* You lied! */
        if (valid_hooks != t->valid_hooks) {
-               duprintf("Valid hook crap: %08X vs %08X\n",
-                        valid_hooks, t->valid_hooks);
                ret = -EINVAL;
                goto put_module;
        }
@@ -1033,8 +916,6 @@ static int __do_replace(struct net *net, const char *name,
                goto put_module;
 
        /* Update module usage count based on number of rules */
-       duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-               oldinfo->number, oldinfo->initial_entries, newinfo->number);
        if ((oldinfo->number > oldinfo->initial_entries) ||
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
@@ -1104,8 +985,6 @@ static int do_replace(struct net *net, const void __user *user,
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("arp_tables: Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, tmp.counters);
        if (ret)
@@ -1126,55 +1005,17 @@ static int do_add_counters(struct net *net, const void __user *user,
        unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
-       unsigned int num_counters;
-       const char *name;
-       int size;
-       void *ptmp;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
        struct arpt_entry *iter;
        unsigned int addend;
-#ifdef CONFIG_COMPAT
-       struct compat_xt_counters_info compat_tmp;
 
-       if (compat) {
-               ptmp = &compat_tmp;
-               size = sizeof(struct compat_xt_counters_info);
-       } else
-#endif
-       {
-               ptmp = &tmp;
-               size = sizeof(struct xt_counters_info);
-       }
-
-       if (copy_from_user(ptmp, user, size) != 0)
-               return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-       if (compat) {
-               num_counters = compat_tmp.num_counters;
-               name = compat_tmp.name;
-       } else
-#endif
-       {
-               num_counters = tmp.num_counters;
-               name = tmp.name;
-       }
-
-       if (len != size + num_counters * sizeof(struct xt_counters))
-               return -EINVAL;
-
-       paddc = vmalloc(len - size);
-       if (!paddc)
-               return -ENOMEM;
-
-       if (copy_from_user(paddc, user + size, len - size) != 0) {
-               ret = -EFAULT;
-               goto free;
-       }
+       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       if (IS_ERR(paddc))
+               return PTR_ERR(paddc);
 
-       t = xt_find_table_lock(net, NFPROTO_ARP, name);
+       t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
        if (IS_ERR_OR_NULL(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
@@ -1182,7 +1023,7 @@ static int do_add_counters(struct net *net, const void __user *user,
 
        local_bh_disable();
        private = t->private;
-       if (private->number != num_counters) {
+       if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
@@ -1209,6 +1050,18 @@ static int do_add_counters(struct net *net, const void __user *user,
 }
 
 #ifdef CONFIG_COMPAT
+struct compat_arpt_replace {
+       char                            name[XT_TABLE_MAXNAMELEN];
+       u32                             valid_hooks;
+       u32                             num_entries;
+       u32                             size;
+       u32                             hook_entry[NF_ARP_NUMHOOKS];
+       u32                             underflow[NF_ARP_NUMHOOKS];
+       u32                             num_counters;
+       compat_uptr_t                   counters;
+       struct compat_arpt_entry        entries[0];
+};
+
 static inline void compat_release_entry(struct compat_arpt_entry *e)
 {
        struct xt_entry_target *t;
@@ -1217,38 +1070,32 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
        module_put(t->u.kernel.target->me);
 }
 
-static inline int
+static int
 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
                                  struct xt_table_info *newinfo,
                                  unsigned int *size,
                                  const unsigned char *base,
-                                 const unsigned char *limit,
-                                 const unsigned int *hook_entries,
-                                 const unsigned int *underflows,
-                                 const char *name)
+                                 const unsigned char *limit)
 {
        struct xt_entry_target *t;
        struct xt_target *target;
        unsigned int entry_offset;
-       int ret, off, h;
+       int ret, off;
 
-       duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
            (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p, limit = %p\n", e, limit);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset < sizeof(struct compat_arpt_entry) +
-                            sizeof(struct compat_xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+                            sizeof(struct compat_xt_entry_target))
                return -EINVAL;
-       }
 
-       /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct arpt_entry *)e);
+       if (!arp_checkentry(&e->arp))
+               return -EINVAL;
+
+       ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
+                                           e->next_offset);
        if (ret)
                return ret;
 
@@ -1259,8 +1106,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
        target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-                        t->u.user.name);
                ret = PTR_ERR(target);
                goto out;
        }
@@ -1272,17 +1117,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
        if (ret)
                goto release_target;
 
-       /* Check hooks & underflows */
-       for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
-               if ((unsigned char *)e - base == hook_entries[h])
-                       newinfo->hook_entry[h] = hook_entries[h];
-               if ((unsigned char *)e - base == underflows[h])
-                       newinfo->underflow[h] = underflows[h];
-       }
-
-       /* Clear counters and comefrom */
-       memset(&e->counters, 0, sizeof(e->counters));
-       e->comefrom = 0;
        return 0;
 
 release_target:
@@ -1291,18 +1125,17 @@ out:
        return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
-                           unsigned int *size, const char *name,
+                           unsigned int *size,
                            struct xt_table_info *newinfo, unsigned char *base)
 {
        struct xt_entry_target *t;
        struct xt_target *target;
        struct arpt_entry *de;
        unsigned int origsize;
-       int ret, h;
+       int h;
 
-       ret = 0;
        origsize = *size;
        de = (struct arpt_entry *)*dstptr;
        memcpy(de, e, sizeof(struct arpt_entry));
@@ -1323,148 +1156,78 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
                if ((unsigned char *)de - base < newinfo->underflow[h])
                        newinfo->underflow[h] -= origsize - *size;
        }
-       return ret;
 }
 
-static int translate_compat_table(const char *name,
-                                 unsigned int valid_hooks,
-                                 struct xt_table_info **pinfo,
+static int translate_compat_table(struct xt_table_info **pinfo,
                                  void **pentry0,
-                                 unsigned int total_size,
-                                 unsigned int number,
-                                 unsigned int *hook_entries,
-                                 unsigned int *underflows)
+                                 const struct compat_arpt_replace *compatr)
 {
        unsigned int i, j;
        struct xt_table_info *newinfo, *info;
        void *pos, *entry0, *entry1;
        struct compat_arpt_entry *iter0;
-       struct arpt_entry *iter1;
+       struct arpt_replace repl;
        unsigned int size;
        int ret = 0;
 
        info = *pinfo;
        entry0 = *pentry0;
-       size = total_size;
-       info->number = number;
+       size = compatr->size;
+       info->number = compatr->num_entries;
 
-       /* Init all hooks to impossible value. */
-       for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
-               info->hook_entry[i] = 0xFFFFFFFF;
-               info->underflow[i] = 0xFFFFFFFF;
-       }
-
-       duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(NFPROTO_ARP);
-       xt_compat_init_offsets(NFPROTO_ARP, number);
+       xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
        /* Walk through entries, checking offsets. */
-       xt_entry_foreach(iter0, entry0, total_size) {
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
                                                        entry0,
-                                                       entry0 + total_size,
-                                                       hook_entries,
-                                                       underflows,
-                                                       name);
+                                                       entry0 + compatr->size);
                if (ret != 0)
                        goto out_unlock;
                ++j;
        }
 
        ret = -EINVAL;
-       if (j != number) {
-               duprintf("translate_compat_table: %u not %u entries\n",
-                        j, number);
+       if (j != compatr->num_entries)
                goto out_unlock;
-       }
-
-       /* Check hooks all assigned */
-       for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
-               /* Only hooks which are valid */
-               if (!(valid_hooks & (1 << i)))
-                       continue;
-               if (info->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, hook_entries[i]);
-                       goto out_unlock;
-               }
-               if (info->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, underflows[i]);
-                       goto out_unlock;
-               }
-       }
 
        ret = -ENOMEM;
        newinfo = xt_alloc_table_info(size);
        if (!newinfo)
                goto out_unlock;
 
-       newinfo->number = number;
+       newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = info->hook_entry[i];
                newinfo->underflow[i] = info->underflow[i];
        }
        entry1 = newinfo->entries;
        pos = entry1;
-       size = total_size;
-       xt_entry_foreach(iter0, entry0, total_size) {
-               ret = compat_copy_entry_from_user(iter0, &pos, &size,
-                                                 name, newinfo, entry1);
-               if (ret != 0)
-                       break;
-       }
+       size = compatr->size;
+       xt_entry_foreach(iter0, entry0, compatr->size)
+               compat_copy_entry_from_user(iter0, &pos, &size,
+                                           newinfo, entry1);
+
+       /* all module references in entry0 are now gone */
+
        xt_compat_flush_offsets(NFPROTO_ARP);
        xt_compat_unlock(NFPROTO_ARP);
-       if (ret)
-               goto free_newinfo;
 
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry1))
-               goto free_newinfo;
+       memcpy(&repl, compatr, sizeof(*compatr));
 
-       i = 0;
-       xt_entry_foreach(iter1, entry1, newinfo->size) {
-               iter1->counters.pcnt = xt_percpu_counter_alloc();
-               if (IS_ERR_VALUE(iter1->counters.pcnt)) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               ret = check_target(iter1, name);
-               if (ret != 0) {
-                       xt_percpu_counter_free(iter1->counters.pcnt);
-                       break;
-               }
-               ++i;
-               if (strcmp(arpt_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
-       }
-       if (ret) {
-               /*
-                * The first i matches need cleanup_entry (calls ->destroy)
-                * because they had called ->check already. The other j-i
-                * entries need only release.
-                */
-               int skip = i;
-               j -= i;
-               xt_entry_foreach(iter0, entry0, newinfo->size) {
-                       if (skip-- > 0)
-                               continue;
-                       if (j-- == 0)
-                               break;
-                       compat_release_entry(iter0);
-               }
-               xt_entry_foreach(iter1, entry1, newinfo->size) {
-                       if (i-- == 0)
-                               break;
-                       cleanup_entry(iter1);
-               }
-               xt_free_table_info(newinfo);
-               return ret;
+       for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+               repl.hook_entry[i] = newinfo->hook_entry[i];
+               repl.underflow[i] = newinfo->underflow[i];
        }
 
+       repl.num_counters = 0;
+       repl.counters = NULL;
+       repl.size = newinfo->size;
+       ret = translate_table(newinfo, entry1, &repl);
+       if (ret)
+               goto free_newinfo;
+
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
@@ -1472,31 +1235,18 @@ static int translate_compat_table(const char *name,
 
 free_newinfo:
        xt_free_table_info(newinfo);
-out:
-       xt_entry_foreach(iter0, entry0, total_size) {
+       return ret;
+out_unlock:
+       xt_compat_flush_offsets(NFPROTO_ARP);
+       xt_compat_unlock(NFPROTO_ARP);
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                if (j-- == 0)
                        break;
                compat_release_entry(iter0);
        }
        return ret;
-out_unlock:
-       xt_compat_flush_offsets(NFPROTO_ARP);
-       xt_compat_unlock(NFPROTO_ARP);
-       goto out;
 }
 
-struct compat_arpt_replace {
-       char                            name[XT_TABLE_MAXNAMELEN];
-       u32                             valid_hooks;
-       u32                             num_entries;
-       u32                             size;
-       u32                             hook_entry[NF_ARP_NUMHOOKS];
-       u32                             underflow[NF_ARP_NUMHOOKS];
-       u32                             num_counters;
-       compat_uptr_t                   counters;
-       struct compat_arpt_entry        entries[0];
-};
-
 static int compat_do_replace(struct net *net, void __user *user,
                             unsigned int len)
 {
@@ -1510,8 +1260,6 @@ static int compat_do_replace(struct net *net, void __user *user,
                return -EFAULT;
 
        /* overflow check */
-       if (tmp.size >= INT_MAX / num_possible_cpus())
-               return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
        if (tmp.num_counters == 0)
@@ -1529,15 +1277,10 @@ static int compat_do_replace(struct net *net, void __user *user,
                goto free_newinfo;
        }
 
-       ret = translate_compat_table(tmp.name, tmp.valid_hooks,
-                                    &newinfo, &loc_cpu_entry, tmp.size,
-                                    tmp.num_entries, tmp.hook_entry,
-                                    tmp.underflow);
+       ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("compat_do_replace: Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, compat_ptr(tmp.counters));
        if (ret)
@@ -1570,7 +1313,6 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
                break;
 
        default:
-               duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -1653,17 +1395,13 @@ static int compat_get_entries(struct net *net,
        struct compat_arpt_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
-       if (*len != sizeof(struct compat_arpt_get_entries) + get.size) {
-               duprintf("compat_get_entries: %u != %zu\n",
-                        *len, sizeof(get) + get.size);
+       if (*len != sizeof(struct compat_arpt_get_entries) + get.size)
                return -EINVAL;
-       }
+
        get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(NFPROTO_ARP);
@@ -1672,16 +1410,13 @@ static int compat_get_entries(struct net *net,
                const struct xt_table_info *private = t->private;
                struct xt_table_info info;
 
-               duprintf("t->private->number = %u\n", private->number);
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size) {
                        ret = compat_copy_entries_to_user(private->size,
                                                          t, uptr->entrytable);
-               } else if (!ret) {
-                       duprintf("compat_get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               } else if (!ret)
                        ret = -EAGAIN;
-               }
+
                xt_compat_flush_offsets(NFPROTO_ARP);
                module_put(t->me);
                xt_table_unlock(t);
@@ -1733,7 +1468,6 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned
                break;
 
        default:
-               duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -1776,7 +1510,6 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
        }
 
        default:
-               duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -1821,7 +1554,6 @@ int arpt_register_table(struct net *net,
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
        ret = translate_table(newinfo, loc_cpu_entry, repl);
-       duprintf("arpt_register_table: translate table gives %d\n", ret);
        if (ret != 0)
                goto out_free;
 
index dd8c80dc32a2216d3705b75cc761bdb156806dc9..8f8713b4388fbfa9a0d36298603995b02718d21d 100644 (file)
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
                return ret;
        }
 
+       ret = arptable_filter_table_init(&init_net);
+       if (ret) {
+               unregister_pernet_subsys(&arptable_filter_net_ops);
+               kfree(arpfilter_ops);
+       }
+
        return ret;
 }
 
index 631c100a13384203c06ec531ef901cd3aac74f90..54906e0e8e0c093a8680e6b1b0b45fc5dff9387d 100644 (file)
@@ -35,34 +35,12 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("IPv4 packet filter");
 
-/*#define DEBUG_IP_FIREWALL*/
-/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
-/*#define DEBUG_IP_FIREWALL_USER*/
-
-#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) pr_info(format , ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) pr_info(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
 #ifdef CONFIG_NETFILTER_DEBUG
 #define IP_NF_ASSERT(x)                WARN_ON(!(x))
 #else
 #define IP_NF_ASSERT(x)
 #endif
 
-#if 0
-/* All the better to debug you with... */
-#define static
-#define inline
-#endif
-
 void *ipt_alloc_initial_table(const struct xt_table *info)
 {
        return xt_alloc_initial_table(ipt, IPT);
@@ -85,52 +63,28 @@ ip_packet_match(const struct iphdr *ip,
        if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
                  IPT_INV_SRCIP) ||
            FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
-                 IPT_INV_DSTIP)) {
-               dprintf("Source or dest mismatch.\n");
-
-               dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
-                       &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
-                       ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
-               dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
-                       &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
-                       ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
+                 IPT_INV_DSTIP))
                return false;
-       }
 
        ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
 
-       if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
-               dprintf("VIA in mismatch (%s vs %s).%s\n",
-                       indev, ipinfo->iniface,
-                       ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
+       if (FWINV(ret != 0, IPT_INV_VIA_IN))
                return false;
-       }
 
        ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
 
-       if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
-               dprintf("VIA out mismatch (%s vs %s).%s\n",
-                       outdev, ipinfo->outiface,
-                       ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
+       if (FWINV(ret != 0, IPT_INV_VIA_OUT))
                return false;
-       }
 
        /* Check specific protocol */
        if (ipinfo->proto &&
-           FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
-               dprintf("Packet protocol %hi does not match %hi.%s\n",
-                       ip->protocol, ipinfo->proto,
-                       ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
+           FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO))
                return false;
-       }
 
        /* If we have a fragment rule but the packet is not a fragment
         * then we return zero */
-       if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
-               dprintf("Fragment rule but not fragment.%s\n",
-                       ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
+       if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG))
                return false;
-       }
 
        return true;
 }
@@ -138,16 +92,10 @@ ip_packet_match(const struct iphdr *ip,
 static bool
 ip_checkentry(const struct ipt_ip *ip)
 {
-       if (ip->flags & ~IPT_F_MASK) {
-               duprintf("Unknown flag bits set: %08X\n",
-                        ip->flags & ~IPT_F_MASK);
+       if (ip->flags & ~IPT_F_MASK)
                return false;
-       }
-       if (ip->invflags & ~IPT_INV_MASK) {
-               duprintf("Unknown invflag bits set: %08X\n",
-                        ip->invflags & ~IPT_INV_MASK);
+       if (ip->invflags & ~IPT_INV_MASK)
                return false;
-       }
        return true;
 }
 
@@ -346,10 +294,6 @@ ipt_do_table(struct sk_buff *skb,
 
        e = get_entry(table_base, private->hook_entry[hook]);
 
-       pr_debug("Entering %s(hook %u), UF %p\n",
-                table->name, hook,
-                get_entry(table_base, private->underflow[hook]));
-
        do {
                const struct xt_entry_target *t;
                const struct xt_entry_match *ematch;
@@ -396,22 +340,15 @@ ipt_do_table(struct sk_buff *skb,
                                if (stackidx == 0) {
                                        e = get_entry(table_base,
                                            private->underflow[hook]);
-                                       pr_debug("Underflow (this is normal) "
-                                                "to %p\n", e);
                                } else {
                                        e = jumpstack[--stackidx];
-                                       pr_debug("Pulled %p out from pos %u\n",
-                                                e, stackidx);
                                        e = ipt_next_entry(e);
                                }
                                continue;
                        }
                        if (table_base + v != ipt_next_entry(e) &&
-                           !(e->ip.flags & IPT_F_GOTO)) {
+                           !(e->ip.flags & IPT_F_GOTO))
                                jumpstack[stackidx++] = e;
-                               pr_debug("Pushed %p into pos %u\n",
-                                        e, stackidx - 1);
-                       }
 
                        e = get_entry(table_base, v);
                        continue;
@@ -429,18 +366,25 @@ ipt_do_table(struct sk_buff *skb,
                        /* Verdict */
                        break;
        } while (!acpar.hotdrop);
-       pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
 
        xt_write_recseq_end(addend);
        local_bh_enable();
 
-#ifdef DEBUG_ALLOW_ALL
-       return NF_ACCEPT;
-#else
        if (acpar.hotdrop)
                return NF_DROP;
        else return verdict;
-#endif
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+                            const struct ipt_entry *target)
+{
+       struct ipt_entry *iter;
+
+       xt_entry_foreach(iter, t->entries, t->size) {
+                if (iter == target)
+                       return true;
+       }
+       return false;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -468,11 +412,9 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                = (void *)ipt_get_target_c(e);
                        int visited = e->comefrom & (1 << hook);
 
-                       if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
-                               pr_err("iptables: loop hook %u pos %u %08X.\n",
-                                      hook, pos, e->comefrom);
+                       if (e->comefrom & (1 << NF_INET_NUMHOOKS))
                                return 0;
-                       }
+
                        e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
                        /* Unconditional return/END. */
@@ -484,26 +426,13 @@ mark_source_chains(const struct xt_table_info *newinfo,
 
                                if ((strcmp(t->target.u.user.name,
                                            XT_STANDARD_TARGET) == 0) &&
-                                   t->verdict < -NF_MAX_VERDICT - 1) {
-                                       duprintf("mark_source_chains: bad "
-                                               "negative verdict (%i)\n",
-                                                               t->verdict);
+                                   t->verdict < -NF_MAX_VERDICT - 1)
                                        return 0;
-                               }
 
                                /* Return: backtrack through the last
                                   big jump. */
                                do {
                                        e->comefrom ^= (1<<NF_INET_NUMHOOKS);
-#ifdef DEBUG_IP_FIREWALL_USER
-                                       if (e->comefrom
-                                           & (1 << NF_INET_NUMHOOKS)) {
-                                               duprintf("Back unset "
-                                                        "on hook %u "
-                                                        "rule %u\n",
-                                                        hook, pos);
-                                       }
-#endif
                                        oldpos = pos;
                                        pos = e->counters.pcnt;
                                        e->counters.pcnt = 0;
@@ -520,6 +449,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                size = e->next_offset;
                                e = (struct ipt_entry *)
                                        (entry0 + pos + size);
+                               if (pos + size >= newinfo->size)
+                                       return 0;
                                e->counters.pcnt = pos;
                                pos += size;
                        } else {
@@ -528,19 +459,16 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           XT_STANDARD_TARGET) == 0 &&
                                    newpos >= 0) {
-                                       if (newpos > newinfo->size -
-                                               sizeof(struct ipt_entry)) {
-                                               duprintf("mark_source_chains: "
-                                                       "bad verdict (%i)\n",
-                                                               newpos);
-                                               return 0;
-                                       }
                                        /* This a jump; chase it. */
-                                       duprintf("Jump rule %u -> %u\n",
-                                                pos, newpos);
+                                       e = (struct ipt_entry *)
+                                               (entry0 + newpos);
+                                       if (!find_jump_target(newinfo, e))
+                                               return 0;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
+                                       if (newpos >= newinfo->size)
+                                               return 0;
                                }
                                e = (struct ipt_entry *)
                                        (entry0 + newpos);
@@ -548,8 +476,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-next:
-               duprintf("Finished chain %u\n", hook);
+next:          ;
        }
        return 1;
 }
@@ -567,41 +494,16 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
        module_put(par.match->me);
 }
 
-static int
-check_entry(const struct ipt_entry *e)
-{
-       const struct xt_entry_target *t;
-
-       if (!ip_checkentry(&e->ip))
-               return -EINVAL;
-
-       if (e->target_offset + sizeof(struct xt_entry_target) >
-           e->next_offset)
-               return -EINVAL;
-
-       t = ipt_get_target_c(e);
-       if (e->target_offset + t->u.target_size > e->next_offset)
-               return -EINVAL;
-
-       return 0;
-}
-
 static int
 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 {
        const struct ipt_ip *ip = par->entryinfo;
-       int ret;
 
        par->match     = m->u.kernel.match;
        par->matchinfo = m->data;
 
-       ret = xt_check_match(par, m->u.match_size - sizeof(*m),
-             ip->proto, ip->invflags & IPT_INV_PROTO);
-       if (ret < 0) {
-               duprintf("check failed for `%s'.\n", par->match->name);
-               return ret;
-       }
-       return 0;
+       return xt_check_match(par, m->u.match_size - sizeof(*m),
+                             ip->proto, ip->invflags & IPT_INV_PROTO);
 }
 
 static int
@@ -612,10 +514,8 @@ find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 
        match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
                                      m->u.user.revision);
-       if (IS_ERR(match)) {
-               duprintf("find_check_match: `%s' not found\n", m->u.user.name);
+       if (IS_ERR(match))
                return PTR_ERR(match);
-       }
        m->u.kernel.match = match;
 
        ret = check_match(m, par);
@@ -640,16 +540,9 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
                .hook_mask = e->comefrom,
                .family    = NFPROTO_IPV4,
        };
-       int ret;
 
-       ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
-             e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
-       if (ret < 0) {
-               duprintf("check failed for `%s'.\n",
-                        t->u.kernel.target->name);
-               return ret;
-       }
-       return 0;
+       return xt_check_target(&par, t->u.target_size - sizeof(*t),
+                              e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
 }
 
 static int
@@ -662,10 +555,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        unsigned int j;
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
+       unsigned long pcnt;
 
-       e->counters.pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(e->counters.pcnt))
+       pcnt = xt_percpu_counter_alloc();
+       if (IS_ERR_VALUE(pcnt))
                return -ENOMEM;
+       e->counters.pcnt = pcnt;
 
        j = 0;
        mtpar.net       = net;
@@ -684,7 +579,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
                ret = PTR_ERR(target);
                goto cleanup_matches;
        }
@@ -738,19 +632,18 @@ check_entry_size_and_hooks(struct ipt_entry *e,
 
        if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
            (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p\n", e);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset
-           < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+           < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
+               return -EINVAL;
+
+       if (!ip_checkentry(&e->ip))
                return -EINVAL;
-       }
 
-       err = check_entry(e);
+       err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+                                    e->next_offset);
        if (err)
                return err;
 
@@ -761,12 +654,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
                if ((unsigned char *)e - base == hook_entries[h])
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
-                       if (!check_underflow(e)) {
-                               pr_debug("Underflows must be unconditional and "
-                                        "use the STANDARD target with "
-                                        "ACCEPT/DROP\n");
+                       if (!check_underflow(e))
                                return -EINVAL;
-                       }
+
                        newinfo->underflow[h] = underflows[h];
                }
        }
@@ -818,7 +708,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                newinfo->underflow[i] = 0xFFFFFFFF;
        }
 
-       duprintf("translate_table: size %u\n", newinfo->size);
        i = 0;
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -835,27 +724,18 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                        ++newinfo->stacksize;
        }
 
-       if (i != repl->num_entries) {
-               duprintf("translate_table: %u not %u entries\n",
-                        i, repl->num_entries);
+       if (i != repl->num_entries)
                return -EINVAL;
-       }
 
        /* Check hooks all assigned */
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                /* Only hooks which are valid */
                if (!(repl->valid_hooks & (1 << i)))
                        continue;
-               if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, repl->hook_entry[i]);
+               if (newinfo->hook_entry[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
-               if (newinfo->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, repl->underflow[i]);
+               if (newinfo->underflow[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
        }
 
        if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
@@ -1083,11 +963,8 @@ static int get_info(struct net *net, void __user *user,
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct ipt_getinfo)) {
-               duprintf("length %u != %zu\n", *len,
-                        sizeof(struct ipt_getinfo));
+       if (*len != sizeof(struct ipt_getinfo))
                return -EINVAL;
-       }
 
        if (copy_from_user(name, user, sizeof(name)) != 0)
                return -EFAULT;
@@ -1145,31 +1022,23 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
        struct ipt_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
-       if (*len != sizeof(struct ipt_get_entries) + get.size) {
-               duprintf("get_entries: %u != %zu\n",
-                        *len, sizeof(get) + get.size);
+       if (*len != sizeof(struct ipt_get_entries) + get.size)
                return -EINVAL;
-       }
        get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR_OR_NULL(t)) {
                const struct xt_table_info *private = t->private;
-               duprintf("t->private->number = %u\n", private->number);
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
-               else {
-                       duprintf("get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               else
                        ret = -EAGAIN;
-               }
+
                module_put(t->me);
                xt_table_unlock(t);
        } else
@@ -1205,8 +1074,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 
        /* You lied! */
        if (valid_hooks != t->valid_hooks) {
-               duprintf("Valid hook crap: %08X vs %08X\n",
-                        valid_hooks, t->valid_hooks);
                ret = -EINVAL;
                goto put_module;
        }
@@ -1216,8 +1083,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
                goto put_module;
 
        /* Update module usage count based on number of rules */
-       duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-               oldinfo->number, oldinfo->initial_entries, newinfo->number);
        if ((oldinfo->number > oldinfo->initial_entries) ||
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
@@ -1286,8 +1151,6 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, tmp.counters);
        if (ret)
@@ -1309,55 +1172,17 @@ do_add_counters(struct net *net, const void __user *user,
        unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
-       unsigned int num_counters;
-       const char *name;
-       int size;
-       void *ptmp;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
        struct ipt_entry *iter;
        unsigned int addend;
-#ifdef CONFIG_COMPAT
-       struct compat_xt_counters_info compat_tmp;
 
-       if (compat) {
-               ptmp = &compat_tmp;
-               size = sizeof(struct compat_xt_counters_info);
-       } else
-#endif
-       {
-               ptmp = &tmp;
-               size = sizeof(struct xt_counters_info);
-       }
+       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       if (IS_ERR(paddc))
+               return PTR_ERR(paddc);
 
-       if (copy_from_user(ptmp, user, size) != 0)
-               return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-       if (compat) {
-               num_counters = compat_tmp.num_counters;
-               name = compat_tmp.name;
-       } else
-#endif
-       {
-               num_counters = tmp.num_counters;
-               name = tmp.name;
-       }
-
-       if (len != size + num_counters * sizeof(struct xt_counters))
-               return -EINVAL;
-
-       paddc = vmalloc(len - size);
-       if (!paddc)
-               return -ENOMEM;
-
-       if (copy_from_user(paddc, user + size, len - size) != 0) {
-               ret = -EFAULT;
-               goto free;
-       }
-
-       t = xt_find_table_lock(net, AF_INET, name);
+       t = xt_find_table_lock(net, AF_INET, tmp.name);
        if (IS_ERR_OR_NULL(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
@@ -1365,7 +1190,7 @@ do_add_counters(struct net *net, const void __user *user,
 
        local_bh_disable();
        private = t->private;
-       if (private->number != num_counters) {
+       if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
@@ -1444,7 +1269,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
 
 static int
 compat_find_calc_match(struct xt_entry_match *m,
-                      const char *name,
                       const struct ipt_ip *ip,
                       int *size)
 {
@@ -1452,11 +1276,9 @@ compat_find_calc_match(struct xt_entry_match *m,
 
        match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
                                      m->u.user.revision);
-       if (IS_ERR(match)) {
-               duprintf("compat_check_calc_match: `%s' not found\n",
-                        m->u.user.name);
+       if (IS_ERR(match))
                return PTR_ERR(match);
-       }
+
        m->u.kernel.match = match;
        *size += xt_compat_match_offset(match);
        return 0;
@@ -1479,35 +1301,29 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
                                  struct xt_table_info *newinfo,
                                  unsigned int *size,
                                  const unsigned char *base,
-                                 const unsigned char *limit,
-                                 const unsigned int *hook_entries,
-                                 const unsigned int *underflows,
-                                 const char *name)
+                                 const unsigned char *limit)
 {
        struct xt_entry_match *ematch;
        struct xt_entry_target *t;
        struct xt_target *target;
        unsigned int entry_offset;
        unsigned int j;
-       int ret, off, h;
+       int ret, off;
 
-       duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
            (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p, limit = %p\n", e, limit);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset < sizeof(struct compat_ipt_entry) +
-                            sizeof(struct compat_xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+                            sizeof(struct compat_xt_entry_target))
                return -EINVAL;
-       }
 
-       /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct ipt_entry *)e);
+       if (!ip_checkentry(&e->ip))
+               return -EINVAL;
+
+       ret = xt_compat_check_entry_offsets(e, e->elems,
+                                           e->target_offset, e->next_offset);
        if (ret)
                return ret;
 
@@ -1515,7 +1331,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name, &e->ip, &off);
+               ret = compat_find_calc_match(ematch, &e->ip, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
@@ -1525,8 +1341,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-                        t->u.user.name);
                ret = PTR_ERR(target);
                goto release_matches;
        }
@@ -1538,17 +1352,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        if (ret)
                goto out;
 
-       /* Check hooks & underflows */
-       for (h = 0; h < NF_INET_NUMHOOKS; h++) {
-               if ((unsigned char *)e - base == hook_entries[h])
-                       newinfo->hook_entry[h] = hook_entries[h];
-               if ((unsigned char *)e - base == underflows[h])
-                       newinfo->underflow[h] = underflows[h];
-       }
-
-       /* Clear counters and comefrom */
-       memset(&e->counters, 0, sizeof(e->counters));
-       e->comefrom = 0;
        return 0;
 
 out:
@@ -1562,19 +1365,18 @@ release_matches:
        return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
-                           unsigned int *size, const char *name,
+                           unsigned int *size,
                            struct xt_table_info *newinfo, unsigned char *base)
 {
        struct xt_entry_target *t;
        struct xt_target *target;
        struct ipt_entry *de;
        unsigned int origsize;
-       int ret, h;
+       int h;
        struct xt_entry_match *ematch;
 
-       ret = 0;
        origsize = *size;
        de = (struct ipt_entry *)*dstptr;
        memcpy(de, e, sizeof(struct ipt_entry));
@@ -1583,201 +1385,101 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
        *dstptr += sizeof(struct ipt_entry);
        *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
 
-       xt_ematch_foreach(ematch, e) {
-               ret = xt_compat_match_from_user(ematch, dstptr, size);
-               if (ret != 0)
-                       return ret;
-       }
+       xt_ematch_foreach(ematch, e)
+               xt_compat_match_from_user(ematch, dstptr, size);
+
        de->target_offset = e->target_offset - (origsize - *size);
        t = compat_ipt_get_target(e);
        target = t->u.kernel.target;
        xt_compat_target_from_user(t, dstptr, size);
 
        de->next_offset = e->next_offset - (origsize - *size);
+
        for (h = 0; h < NF_INET_NUMHOOKS; h++) {
                if ((unsigned char *)de - base < newinfo->hook_entry[h])
                        newinfo->hook_entry[h] -= origsize - *size;
                if ((unsigned char *)de - base < newinfo->underflow[h])
                        newinfo->underflow[h] -= origsize - *size;
        }
-       return ret;
-}
-
-static int
-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
-{
-       struct xt_entry_match *ematch;
-       struct xt_mtchk_param mtpar;
-       unsigned int j;
-       int ret = 0;
-
-       e->counters.pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(e->counters.pcnt))
-               return -ENOMEM;
-
-       j = 0;
-       mtpar.net       = net;
-       mtpar.table     = name;
-       mtpar.entryinfo = &e->ip;
-       mtpar.hook_mask = e->comefrom;
-       mtpar.family    = NFPROTO_IPV4;
-       xt_ematch_foreach(ematch, e) {
-               ret = check_match(ematch, &mtpar);
-               if (ret != 0)
-                       goto cleanup_matches;
-               ++j;
-       }
-
-       ret = check_target(e, net, name);
-       if (ret)
-               goto cleanup_matches;
-       return 0;
-
- cleanup_matches:
-       xt_ematch_foreach(ematch, e) {
-               if (j-- == 0)
-                       break;
-               cleanup_match(ematch, net);
-       }
-
-       xt_percpu_counter_free(e->counters.pcnt);
-
-       return ret;
 }
 
 static int
 translate_compat_table(struct net *net,
-                      const char *name,
-                      unsigned int valid_hooks,
                       struct xt_table_info **pinfo,
                       void **pentry0,
-                      unsigned int total_size,
-                      unsigned int number,
-                      unsigned int *hook_entries,
-                      unsigned int *underflows)
+                      const struct compat_ipt_replace *compatr)
 {
        unsigned int i, j;
        struct xt_table_info *newinfo, *info;
        void *pos, *entry0, *entry1;
        struct compat_ipt_entry *iter0;
-       struct ipt_entry *iter1;
+       struct ipt_replace repl;
        unsigned int size;
        int ret;
 
        info = *pinfo;
        entry0 = *pentry0;
-       size = total_size;
-       info->number = number;
-
-       /* Init all hooks to impossible value. */
-       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               info->hook_entry[i] = 0xFFFFFFFF;
-               info->underflow[i] = 0xFFFFFFFF;
-       }
+       size = compatr->size;
+       info->number = compatr->num_entries;
 
-       duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET);
-       xt_compat_init_offsets(AF_INET, number);
+       xt_compat_init_offsets(AF_INET, compatr->num_entries);
        /* Walk through entries, checking offsets. */
-       xt_entry_foreach(iter0, entry0, total_size) {
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
                                                        entry0,
-                                                       entry0 + total_size,
-                                                       hook_entries,
-                                                       underflows,
-                                                       name);
+                                                       entry0 + compatr->size);
                if (ret != 0)
                        goto out_unlock;
                ++j;
        }
 
        ret = -EINVAL;
-       if (j != number) {
-               duprintf("translate_compat_table: %u not %u entries\n",
-                        j, number);
+       if (j != compatr->num_entries)
                goto out_unlock;
-       }
-
-       /* Check hooks all assigned */
-       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               /* Only hooks which are valid */
-               if (!(valid_hooks & (1 << i)))
-                       continue;
-               if (info->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, hook_entries[i]);
-                       goto out_unlock;
-               }
-               if (info->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, underflows[i]);
-                       goto out_unlock;
-               }
-       }
 
        ret = -ENOMEM;
        newinfo = xt_alloc_table_info(size);
        if (!newinfo)
                goto out_unlock;
 
-       newinfo->number = number;
+       newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               newinfo->hook_entry[i] = info->hook_entry[i];
-               newinfo->underflow[i] = info->underflow[i];
+               newinfo->hook_entry[i] = compatr->hook_entry[i];
+               newinfo->underflow[i] = compatr->underflow[i];
        }
        entry1 = newinfo->entries;
        pos = entry1;
-       size = total_size;
-       xt_entry_foreach(iter0, entry0, total_size) {
-               ret = compat_copy_entry_from_user(iter0, &pos, &size,
-                                                 name, newinfo, entry1);
-               if (ret != 0)
-                       break;
-       }
+       size = compatr->size;
+       xt_entry_foreach(iter0, entry0, compatr->size)
+               compat_copy_entry_from_user(iter0, &pos, &size,
+                                           newinfo, entry1);
+
+       /* all module references in entry0 are now gone.
+        * entry1/newinfo contains a 64bit ruleset that looks exactly as
+        * generated by 64bit userspace.
+        *
+        * Call standard translate_table() to validate all hook_entrys,
+        * underflows, check for loops, etc.
+        */
        xt_compat_flush_offsets(AF_INET);
        xt_compat_unlock(AF_INET);
-       if (ret)
-               goto free_newinfo;
 
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry1))
-               goto free_newinfo;
+       memcpy(&repl, compatr, sizeof(*compatr));
 
-       i = 0;
-       xt_entry_foreach(iter1, entry1, newinfo->size) {
-               ret = compat_check_entry(iter1, net, name);
-               if (ret != 0)
-                       break;
-               ++i;
-               if (strcmp(ipt_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
-       }
-       if (ret) {
-               /*
-                * The first i matches need cleanup_entry (calls ->destroy)
-                * because they had called ->check already. The other j-i
-                * entries need only release.
-                */
-               int skip = i;
-               j -= i;
-               xt_entry_foreach(iter0, entry0, newinfo->size) {
-                       if (skip-- > 0)
-                               continue;
-                       if (j-- == 0)
-                               break;
-                       compat_release_entry(iter0);
-               }
-               xt_entry_foreach(iter1, entry1, newinfo->size) {
-                       if (i-- == 0)
-                               break;
-                       cleanup_entry(iter1, net);
-               }
-               xt_free_table_info(newinfo);
-               return ret;
+       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+               repl.hook_entry[i] = newinfo->hook_entry[i];
+               repl.underflow[i] = newinfo->underflow[i];
        }
 
+       repl.num_counters = 0;
+       repl.counters = NULL;
+       repl.size = newinfo->size;
+       ret = translate_table(net, newinfo, entry1, &repl);
+       if (ret)
+               goto free_newinfo;
+
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
@@ -1785,17 +1487,16 @@ translate_compat_table(struct net *net,
 
 free_newinfo:
        xt_free_table_info(newinfo);
-out:
-       xt_entry_foreach(iter0, entry0, total_size) {
+       return ret;
+out_unlock:
+       xt_compat_flush_offsets(AF_INET);
+       xt_compat_unlock(AF_INET);
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                if (j-- == 0)
                        break;
                compat_release_entry(iter0);
        }
        return ret;
-out_unlock:
-       xt_compat_flush_offsets(AF_INET);
-       xt_compat_unlock(AF_INET);
-       goto out;
 }
 
 static int
@@ -1811,8 +1512,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -EFAULT;
 
        /* overflow check */
-       if (tmp.size >= INT_MAX / num_possible_cpus())
-               return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
        if (tmp.num_counters == 0)
@@ -1831,15 +1530,10 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                goto free_newinfo;
        }
 
-       ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
-                                    &newinfo, &loc_cpu_entry, tmp.size,
-                                    tmp.num_entries, tmp.hook_entry,
-                                    tmp.underflow);
+       ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("compat_do_replace: Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, compat_ptr(tmp.counters));
        if (ret)
@@ -1873,7 +1567,6 @@ compat_do_ipt_set_ctl(struct sock *sk,    int cmd, void __user *user,
                break;
 
        default:
-               duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -1923,19 +1616,15 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        struct compat_ipt_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
 
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
 
-       if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
-               duprintf("compat_get_entries: %u != %zu\n",
-                        *len, sizeof(get) + get.size);
+       if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
                return -EINVAL;
-       }
+
        get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(AF_INET);
@@ -1943,16 +1632,13 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        if (!IS_ERR_OR_NULL(t)) {
                const struct xt_table_info *private = t->private;
                struct xt_table_info info;
-               duprintf("t->private->number = %u\n", private->number);
                ret = compat_table_info(private, &info);
-               if (!ret && get.size == info.size) {
+               if (!ret && get.size == info.size)
                        ret = compat_copy_entries_to_user(private->size,
                                                          t, uptr->entrytable);
-               } else if (!ret) {
-                       duprintf("compat_get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               else if (!ret)
                        ret = -EAGAIN;
-               }
+
                xt_compat_flush_offsets(AF_INET);
                module_put(t->me);
                xt_table_unlock(t);
@@ -2005,7 +1691,6 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                break;
 
        default:
-               duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -2057,7 +1742,6 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        }
 
        default:
-               duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -2159,7 +1843,6 @@ icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
                /* We've been asked to examine this packet, and we
                 * can't.  Hence, no choice but to drop.
                 */
-               duprintf("Dropping evil ICMP tinygram.\n");
                par->hotdrop = true;
                return false;
        }
index e3c46e8e276267b74b0482e85a208a70fa7f3d85..ae1a71a97132bd5a5a49996e95c81e7c08b8c60e 100644 (file)
@@ -360,7 +360,7 @@ static int ipv4_init_net(struct net *net)
 
        in->ctl_table[0].data = &nf_conntrack_max;
        in->ctl_table[1].data = &net->ct.count;
-       in->ctl_table[2].data = &net->ct.htable_size;
+       in->ctl_table[2].data = &nf_conntrack_htable_size;
        in->ctl_table[3].data = &net->ct.sysctl_checksum;
        in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
 #endif
index f0dfe92a00d66a6a58301a94b238baa5cae32fb7..c6f3c406f707a06a041035f27776d3966e0f27a0 100644 (file)
@@ -31,15 +31,14 @@ struct ct_iter_state {
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
        struct hlist_nulls_node *n;
 
        for (st->bucket = 0;
-            st->bucket < net->ct.htable_size;
+            st->bucket < nf_conntrack_htable_size;
             st->bucket++) {
                n = rcu_dereference(
-                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+                       hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -49,17 +48,16 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
                                      struct hlist_nulls_node *head)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
        head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
-                       if (++st->bucket >= net->ct.htable_size)
+                       if (++st->bucket >= nf_conntrack_htable_size)
                                return NULL;
                }
                head = rcu_dereference(
-                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+                       hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
        }
        return head;
 }
@@ -114,6 +112,23 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+static bool ct_seq_should_skip(const struct nf_conn *ct,
+                              const struct net *net,
+                              const struct nf_conntrack_tuple_hash *hash)
+{
+       /* we only want to print DIR_ORIGINAL */
+       if (NF_CT_DIRECTION(hash))
+               return true;
+
+       if (nf_ct_l3num(ct) != AF_INET)
+               return true;
+
+       if (!net_eq(nf_ct_net(ct), net))
+               return true;
+
+       return false;
+}
+
 static int ct_seq_show(struct seq_file *s, void *v)
 {
        struct nf_conntrack_tuple_hash *hash = v;
@@ -123,14 +138,15 @@ static int ct_seq_show(struct seq_file *s, void *v)
        int ret = 0;
 
        NF_CT_ASSERT(ct);
-       if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+       if (ct_seq_should_skip(ct, seq_file_net(s), hash))
                return 0;
 
+       if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+               return 0;
 
-       /* we only want to print DIR_ORIGINAL */
-       if (NF_CT_DIRECTION(hash))
-               goto release;
-       if (nf_ct_l3num(ct) != AF_INET)
+       /* check if we raced w. object reuse */
+       if (!nf_ct_is_confirmed(ct) ||
+           ct_seq_should_skip(ct, seq_file_net(s), hash))
                goto release;
 
        l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
@@ -220,13 +236,12 @@ struct ct_expect_iter_state {
 
 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
                n = rcu_dereference(
-                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+                       hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -236,7 +251,6 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
                                             struct hlist_node *head)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
        head = rcu_dereference(hlist_next_rcu(head));
@@ -244,7 +258,7 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
                head = rcu_dereference(
-                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+                       hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
        }
        return head;
 }
@@ -285,6 +299,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
 
        exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
 
+       if (!net_eq(nf_ct_net(exp->master), seq_file_net(s)))
+               return 0;
+
        if (exp->tuple.src.l3num != AF_INET)
                return 0;
 
index 02c62299d717b9f6c38a5227e3b3ae376e0015b6..a1f2830d811025e561c9739c79a5c218a3801977 100644 (file)
@@ -915,11 +915,11 @@ static int ip_error(struct sk_buff *skb)
        if (!IN_DEV_FORWARD(in_dev)) {
                switch (rt->dst.error) {
                case EHOSTUNREACH:
-                       IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
+                       __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
                        break;
 
                case ENETUNREACH:
-                       IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+                       __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
                        break;
                }
                goto out;
@@ -934,7 +934,7 @@ static int ip_error(struct sk_buff *skb)
                break;
        case ENETUNREACH:
                code = ICMP_NET_UNREACH;
-               IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+               __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
                break;
        case EACCES:
                code = ICMP_PKT_FILTERED;
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #endif
 }
 
-static struct rtable *rt_dst_alloc(struct net_device *dev,
-                                  unsigned int flags, u16 type,
-                                  bool nopolicy, bool noxfrm, bool will_cache)
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                           unsigned int flags, u16 type,
+                           bool nopolicy, bool noxfrm, bool will_cache)
 {
        struct rtable *rt;
 
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
 
        return rt;
 }
+EXPORT_SYMBOL(rt_dst_alloc);
 
 /* called in rcu_read_lock() section */
 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                 */
                if (fi && res->prefixlen < 4)
                        fi = NULL;
+       } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+                  (orig_oif != dev_out->ifindex)) {
+               /* For local routes that require a particular output interface
+                * we do not want to cache the result.  Caching the result
+                * causes incorrect behaviour when there are multiple source
+                * addresses on the interface, the end result being that if the
+                * intended recipient is waiting on that interface for the
+                * packet he won't receive it because it will be delivered on
+                * the loopback interface and the IP_PKTINFO ipi_ifindex will
+                * be set to the loopback interface as well.
+                */
+               fi = NULL;
        }
 
        fnhe = NULL;
@@ -2133,6 +2146,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
        unsigned int flags = 0;
        struct fib_result res;
        struct rtable *rth;
+       int master_idx;
        int orig_oif;
        int err = -ENETUNREACH;
 
@@ -2142,6 +2156,9 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
 
        orig_oif = fl4->flowi4_oif;
 
+       master_idx = l3mdev_master_ifindex_by_index(net, fl4->flowi4_oif);
+       if (master_idx)
+               fl4->flowi4_oif = master_idx;
        fl4->flowi4_iif = LOOPBACK_IFINDEX;
        fl4->flowi4_tos = tos & IPTOS_RT_MASK;
        fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
index 4c04f09338e3410dd75a085674c0fcf355f43164..e3c4043c27de289b7761cef4adbcd6c8f731d534 100644 (file)
@@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 
        mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
        if (mss == 0) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
                goto out;
        }
 
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
index 1e1fe6086dd912e5071524053581afa79578061b..bb0419582b8dfed626f04752307ddaf2671ed01c 100644 (file)
@@ -960,6 +960,17 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+       {
+               .procname       = "fib_multipath_use_neigh",
+               .data           = &init_net.ipv4.sysctl_fib_multipath_use_neigh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+#endif
        { }
 };
 
index 4d73858991afd00431cda231bf162d0970ea0a02..5c7ed147449c1b7ba029b12e033ad779a631460a 100644 (file)
@@ -430,14 +430,15 @@ EXPORT_SYMBOL(tcp_init_sock);
 
 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
 {
-       if (sk->sk_tsflags || tsflags) {
+       if (tsflags) {
                struct skb_shared_info *shinfo = skb_shinfo(skb);
                struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
                sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
-               if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
+               if (tsflags & SOF_TIMESTAMPING_TX_ACK)
+                       tcb->txstamp_ack = 1;
+               if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
                        shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
-               tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP);
        }
 }
 
@@ -908,7 +909,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                int copy, i;
                bool can_coalesce;
 
-               if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
+               if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 ||
+                   !tcp_skb_can_collapse_to(skb)) {
 new_segment:
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
@@ -1082,6 +1084,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        struct sockcm_cookie sockc;
        int flags, err, copied = 0;
        int mss_now = 0, size_goal, copied_syn = 0;
+       bool process_backlog = false;
        bool sg;
        long timeo;
 
@@ -1134,11 +1137,12 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        /* This should be in poll */
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
-       mss_now = tcp_send_mss(sk, &size_goal, flags);
-
        /* Ok commence sending. */
        copied = 0;
 
+restart:
+       mss_now = tcp_send_mss(sk, &size_goal, flags);
+
        err = -EPIPE;
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
@@ -1156,7 +1160,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                        copy = max - skb->len;
                }
 
-               if (copy <= 0) {
+               if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
 new_segment:
                        /* Allocate new segment. If the interface is SG,
                         * allocate skb fitting to single page.
@@ -1164,6 +1168,10 @@ new_segment:
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
 
+                       if (process_backlog && sk_flush_backlog(sk)) {
+                               process_backlog = false;
+                               goto restart;
+                       }
                        skb = sk_stream_alloc_skb(sk,
                                                  select_size(sk, sg),
                                                  sk->sk_allocation,
@@ -1171,6 +1179,7 @@ new_segment:
                        if (!skb)
                                goto wait_for_memory;
 
+                       process_backlog = true;
                        /*
                         * Check whether we can use HW checksum.
                         */
@@ -1250,6 +1259,8 @@ new_segment:
                copied += copy;
                if (!msg_data_left(msg)) {
                        tcp_tx_timestamp(sk, sockc.tsflags, skb);
+                       if (unlikely(flags & MSG_EOR))
+                               TCP_SKB_CB(skb)->eor = 1;
                        goto out;
                }
 
@@ -1443,14 +1454,10 @@ static void tcp_prequeue_process(struct sock *sk)
        struct sk_buff *skb;
        struct tcp_sock *tp = tcp_sk(sk);
 
-       NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
 
-       /* RX process wants to run with disabled BHs, though it is not
-        * necessary */
-       local_bh_disable();
        while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                sk_backlog_rcv(sk, skb);
-       local_bh_enable();
 
        /* Clear memory counter. */
        tp->ucopy.memory = 0;
@@ -1777,7 +1784,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 
                        chunk = len - tp->ucopy.len;
                        if (chunk != 0) {
-                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+                               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -1789,7 +1796,7 @@ do_prequeue:
 
                                chunk = len - tp->ucopy.len;
                                if (chunk != 0) {
-                                       NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                        len -= chunk;
                                        copied += chunk;
                                }
@@ -1875,7 +1882,7 @@ skip_copy:
                        tcp_prequeue_process(sk);
 
                        if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -2065,13 +2072,13 @@ void tcp_close(struct sock *sk, long timeout)
                sk->sk_prot->disconnect(sk, 0);
        } else if (data_was_unread) {
                /* Unread data was tossed, zap the connection. */
-               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
                tcp_set_state(sk, TCP_CLOSE);
                tcp_send_active_reset(sk, sk->sk_allocation);
        } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
                /* Check zero linger _after_ checking for unread data. */
                sk->sk_prot->disconnect(sk, 0);
-               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
        } else if (tcp_close_state(sk)) {
                /* We FIN if the application ate all the data before
                 * zapping the connection.
@@ -2148,7 +2155,7 @@ adjudge_to_death:
                if (tp->linger2 < 0) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(sock_net(sk),
+                       __NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPABORTONLINGER);
                } else {
                        const int tmo = tcp_fin_time(sk);
@@ -2167,7 +2174,7 @@ adjudge_to_death:
                if (tcp_check_oom(sk, 0)) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(sock_net(sk),
+                       __NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPABORTONMEMORY);
                }
        }
@@ -3091,7 +3098,7 @@ void tcp_done(struct sock *sk)
        struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
 
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+               TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
index fd1405d37c149309882742fb12b07331e7282a95..36087bca9f489646c2ca5aae3111449a956dd33b 100644 (file)
@@ -197,15 +197,15 @@ static void bictcp_state(struct sock *sk, u8 new_state)
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
+static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (icsk->icsk_ca_state == TCP_CA_Open) {
                struct bictcp *ca = inet_csk_ca(sk);
 
-               cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-               ca->delayed_ack += cnt;
+               ca->delayed_ack += sample->pkts_acked -
+                       (ca->delayed_ack >> ACK_RATIO_SHIFT);
        }
 }
 
index 167b6a3e1b9868c88e5553b114556ae312dfb99f..03725b2942866e2e9f91d9f9e01ab12ae9ff36dd 100644 (file)
@@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
 
                        ca->last_ack = now_us;
                        if (after(now_us, ca->round_start + base_owd)) {
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTTRAINDETECT);
-                               NET_ADD_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTTRAINCWND,
-                                                tp->snd_cwnd);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTTRAINDETECT);
+                               NET_ADD_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTTRAINCWND,
+                                             tp->snd_cwnd);
                                tp->snd_ssthresh = tp->snd_cwnd;
                                return;
                        }
@@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
                                         125U);
 
                        if (ca->rtt.min > thresh) {
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTDELAYDETECT);
-                               NET_ADD_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTDELAYCWND,
-                                                tp->snd_cwnd);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTDELAYDETECT);
+                               NET_ADD_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTDELAYCWND,
+                                             tp->snd_cwnd);
                                tp->snd_ssthresh = tp->snd_cwnd;
                        }
                }
@@ -294,12 +294,12 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
 }
 
-static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample)
 {
        struct cdg *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (rtt_us <= 0)
+       if (sample->rtt_us <= 0)
                return;
 
        /* A heuristic for filtering delayed ACKs, adapted from:
@@ -307,20 +307,20 @@ static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
         * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
         */
        if (tp->sacked_out == 0) {
-               if (num_acked == 1 && ca->delack) {
+               if (sample->pkts_acked == 1 && ca->delack) {
                        /* A delayed ACK is only used for the minimum if it is
                         * provenly lower than an existing non-zero minimum.
                         */
-                       ca->rtt.min = min(ca->rtt.min, rtt_us);
+                       ca->rtt.min = min(ca->rtt.min, sample->rtt_us);
                        ca->delack--;
                        return;
-               } else if (num_acked > 1 && ca->delack < 5) {
+               } else if (sample->pkts_acked > 1 && ca->delack < 5) {
                        ca->delack++;
                }
        }
 
-       ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us);
-       ca->rtt.max = max(ca->rtt.max, rtt_us);
+       ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us);
+       ca->rtt.max = max(ca->rtt.max, sample->rtt_us);
 }
 
 static u32 tcp_cdg_ssthresh(struct sock *sk)
index 448c2615fece946d7b5cc3136941efff92a2edd9..c99230efcd52d2233ba22fbc117101c8e8cee113 100644 (file)
@@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
                        ca->last_ack = now;
                        if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
                                ca->found |= HYSTART_ACK_TRAIN;
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTTRAINDETECT);
-                               NET_ADD_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTTRAINCWND,
-                                                tp->snd_cwnd);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTTRAINDETECT);
+                               NET_ADD_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTTRAINCWND,
+                                             tp->snd_cwnd);
                                tp->snd_ssthresh = tp->snd_cwnd;
                        }
                }
@@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
                        if (ca->curr_rtt > ca->delay_min +
                            HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
                                ca->found |= HYSTART_DELAY;
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTDELAYDETECT);
-                               NET_ADD_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPHYSTARTDELAYCWND,
-                                                tp->snd_cwnd);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTDELAYDETECT);
+                               NET_ADD_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPHYSTARTDELAYCWND,
+                                             tp->snd_cwnd);
                                tp->snd_ssthresh = tp->snd_cwnd;
                        }
                }
@@ -437,21 +437,21 @@ static void hystart_update(struct sock *sk, u32 delay)
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
        u32 delay;
 
        /* Some calls are for duplicates without timetamps */
-       if (rtt_us < 0)
+       if (sample->rtt_us < 0)
                return;
 
        /* Discard delay samples right after fast recovery */
        if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
                return;
 
-       delay = (rtt_us << 3) / USEC_PER_MSEC;
+       delay = (sample->rtt_us << 3) / USEC_PER_MSEC;
        if (delay == 0)
                delay = 1;
 
index cffd8f9ed1a953031e0a08c090c17c0bcd62effa..54d9f9b0120fe26828f6cc522711f8df8b2bcfb3 100644 (file)
@@ -255,9 +255,9 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
                spin_lock(&fastopenq->lock);
                req1 = fastopenq->rskq_rst_head;
                if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
+                       __NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
                        spin_unlock(&fastopenq->lock);
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
                        return false;
                }
                fastopenq->rskq_rst_head = req1->dl_next;
@@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
        struct sock *child;
 
        if (foc->len == 0) /* Client requests a cookie */
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
 
        if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
              (syn_data || foc->len >= 0) &&
@@ -311,13 +311,13 @@ fastopen:
                child = tcp_fastopen_create_child(sk, skb, dst, req);
                if (child) {
                        foc->len = -1;
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPFASTOPENPASSIVE);
+                       NET_INC_STATS(sock_net(sk),
+                                     LINUX_MIB_TCPFASTOPENPASSIVE);
                        return child;
                }
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
        } else if (foc->len > 0) /* Client presents an invalid cookie */
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 
        valid_foc.exp = foc->exp;
        *foc = valid_foc;
index 82f0d9ed60f50f27854fdb62a95281beed9df819..4a4d8e76738fa2831dcc3ecec5924dd3dfb7bf58 100644 (file)
@@ -99,7 +99,7 @@ static inline void measure_rtt(struct sock *sk, u32 srtt)
 }
 
 static void measure_achieved_throughput(struct sock *sk,
-                                       u32 pkts_acked, s32 rtt)
+                                       const struct ack_sample *sample)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
@@ -107,10 +107,10 @@ static void measure_achieved_throughput(struct sock *sk,
        u32 now = tcp_time_stamp;
 
        if (icsk->icsk_ca_state == TCP_CA_Open)
-               ca->pkts_acked = pkts_acked;
+               ca->pkts_acked = sample->pkts_acked;
 
-       if (rtt > 0)
-               measure_rtt(sk, usecs_to_jiffies(rtt));
+       if (sample->rtt_us > 0)
+               measure_rtt(sk, usecs_to_jiffies(sample->rtt_us));
 
        if (!use_bandwidth_switch)
                return;
@@ -122,7 +122,7 @@ static void measure_achieved_throughput(struct sock *sk,
                return;
        }
 
-       ca->packetcount += pkts_acked;
+       ca->packetcount += sample->pkts_acked;
 
        if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
            now - ca->lasttime >= ca->minRTT &&
index 2ab9bbb6faffb799560df98b093d4cbc1207d816..c8e6d86be11421664f8832666812887a7a6bd07b 100644 (file)
@@ -82,30 +82,31 @@ static void tcp_illinois_init(struct sock *sk)
 }
 
 /* Measure RTT for each ack. */
-static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
+static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
 {
        struct illinois *ca = inet_csk_ca(sk);
+       s32 rtt_us = sample->rtt_us;
 
-       ca->acked = pkts_acked;
+       ca->acked = sample->pkts_acked;
 
        /* dup ack, no rtt sample */
-       if (rtt < 0)
+       if (rtt_us < 0)
                return;
 
        /* ignore bogus values, this prevents wraparound in alpha math */
-       if (rtt > RTT_MAX)
-               rtt = RTT_MAX;
+       if (rtt_us > RTT_MAX)
+               rtt_us = RTT_MAX;
 
        /* keep track of minimum RTT seen so far */
-       if (ca->base_rtt > rtt)
-               ca->base_rtt = rtt;
+       if (ca->base_rtt > rtt_us)
+               ca->base_rtt = rtt_us;
 
        /* and max */
-       if (ca->max_rtt < rtt)
-               ca->max_rtt = rtt;
+       if (ca->max_rtt < rtt_us)
+               ca->max_rtt = rtt_us;
 
        ++ca->cnt_rtt;
-       ca->sum_rtt += rtt;
+       ca->sum_rtt += rtt_us;
 }
 
 /* Maximum queuing delay */
index 983f04c1117786ae7527d98d0d4d7c178de41a0b..d6c8f4cd080001a527f7c137021cc6a3f3604344 100644 (file)
@@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
                else
                        mib_idx = LINUX_MIB_TCPSACKREORDER;
 
-               NET_INC_STATS_BH(sock_net(sk), mib_idx);
+               NET_INC_STATS(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
                         tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
                dup_sack = true;
                tcp_dsack_seen(tp);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
                u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
                u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
                    !before(start_seq_0, start_seq_1)) {
                        dup_sack = true;
                        tcp_dsack_seen(tp);
-                       NET_INC_STATS_BH(sock_net(sk),
+                       NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPDSACKOFORECV);
                }
        }
@@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 
        if (skb->len > 0) {
                BUG_ON(!tcp_skb_pcount(skb));
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
                return false;
        }
 
@@ -1303,16 +1303,18 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
        }
 
        TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+       TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                TCP_SKB_CB(prev)->end_seq++;
 
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
+       tcp_skb_collapse_tstamp(prev, skb);
        tcp_unlink_write_queue(skb, sk);
        sk_wmem_free_skb(sk, skb);
 
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
 
        return true;
 }
@@ -1367,6 +1369,9 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
        if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
                goto fallback;
 
+       if (!tcp_skb_can_collapse_to(prev))
+               goto fallback;
+
        in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
                  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
 
@@ -1468,7 +1473,7 @@ noop:
        return skb;
 
 fallback:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
        return NULL;
 }
 
@@ -1656,7 +1661,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                                mib_idx = LINUX_MIB_TCPSACKDISCARD;
                        }
 
-                       NET_INC_STATS_BH(sock_net(sk), mib_idx);
+                       NET_INC_STATS(sock_net(sk), mib_idx);
                        if (i == 0)
                                first_sack_index = -1;
                        continue;
@@ -1908,7 +1913,7 @@ void tcp_enter_loss(struct sock *sk)
        skb = tcp_write_queue_head(sk);
        is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
        if (is_reneg) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
                tp->sacked_out = 0;
                tp->fackets_out = 0;
        }
@@ -2394,7 +2399,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                else
                        mib_idx = LINUX_MIB_TCPFULLUNDO;
 
-               NET_INC_STATS_BH(sock_net(sk), mib_idx);
+               NET_INC_STATS(sock_net(sk), mib_idx);
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
                /* Hold old state until something *above* high_seq
@@ -2416,7 +2421,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
        if (tp->undo_marker && !tp->undo_retrans) {
                DBGUNDO(sk, "D-SACK");
                tcp_undo_cwnd_reduction(sk, false);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
                return true;
        }
        return false;
@@ -2431,10 +2436,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
                tcp_undo_cwnd_reduction(sk, true);
 
                DBGUNDO(sk, "partial loss");
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                if (frto_undo)
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPSPURIOUSRTOS);
+                       NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
                if (frto_undo || tcp_is_sack(tp))
                        tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2558,7 +2563,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
 
        icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
        icsk->icsk_mtup.probe_size = 0;
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2578,7 +2583,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
        icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
        icsk->icsk_mtup.probe_size = 0;
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2642,7 +2647,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        else
                mib_idx = LINUX_MIB_TCPSACKRECOVERY;
 
-       NET_INC_STATS_BH(sock_net(sk), mib_idx);
+       NET_INC_STATS(sock_net(sk), mib_idx);
 
        tp->prior_ssthresh = 0;
        tcp_init_undo(tp);
@@ -2735,7 +2740,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
 
                DBGUNDO(sk, "partial recovery");
                tcp_undo_cwnd_reduction(sk, true);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
                tcp_try_keep_open(sk);
                return true;
        }
@@ -3086,8 +3091,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
                return;
 
        shinfo = skb_shinfo(skb);
-       if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
-           between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
+       if (!before(shinfo->tskey, prior_snd_una) &&
+           before(shinfo->tskey, tcp_sk(sk)->snd_una))
                __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
 }
 
@@ -3243,8 +3248,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                tcp_rearm_rto(sk);
        }
 
-       if (icsk->icsk_ca_ops->pkts_acked)
-               icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+       if (icsk->icsk_ca_ops->pkts_acked) {
+               struct ack_sample sample = { .pkts_acked = pkts_acked,
+                                            .rtt_us = ca_rtt_us };
+
+               icsk->icsk_ca_ops->pkts_acked(sk, &sample);
+       }
 
 #if FASTRETRANS_DEBUG > 0
        WARN_ON((int)tp->sacked_out < 0);
@@ -3350,9 +3359,10 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
 {
        u32 delta = ack - tp->snd_una;
 
-       u64_stats_update_begin(&tp->syncp);
+       sock_owned_by_me((struct sock *)tp);
+       u64_stats_update_begin_raw(&tp->syncp);
        tp->bytes_acked += delta;
-       u64_stats_update_end(&tp->syncp);
+       u64_stats_update_end_raw(&tp->syncp);
        tp->snd_una = ack;
 }
 
@@ -3361,9 +3371,10 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
 {
        u32 delta = seq - tp->rcv_nxt;
 
-       u64_stats_update_begin(&tp->syncp);
+       sock_owned_by_me((struct sock *)tp);
+       u64_stats_update_begin_raw(&tp->syncp);
        tp->bytes_received += delta;
-       u64_stats_update_end(&tp->syncp);
+       u64_stats_update_end_raw(&tp->syncp);
        tp->rcv_nxt = seq;
 }
 
@@ -3429,7 +3440,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
                s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
 
                if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-                       NET_INC_STATS_BH(net, mib_idx);
+                       NET_INC_STATS(net, mib_idx);
                        return true;    /* rate-limited: don't send yet! */
                }
        }
@@ -3462,7 +3473,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
                challenge_count = 0;
        }
        if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
                tcp_send_ack(sk);
        }
 }
@@ -3511,8 +3522,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                tcp_set_ca_state(sk, TCP_CA_CWR);
                tcp_end_cwnd_reduction(sk);
                tcp_try_keep_open(sk);
-               NET_INC_STATS_BH(sock_net(sk),
-                                LINUX_MIB_TCPLOSSPROBERECOVERY);
+               NET_INC_STATS(sock_net(sk),
+                               LINUX_MIB_TCPLOSSPROBERECOVERY);
        } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
                             FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
                /* Pure dupack: original and TLP probe arrived; no loss */
@@ -3616,14 +3627,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
                tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
 
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
        } else {
                u32 ack_ev_flags = CA_ACK_SLOWPATH;
 
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
                        flag |= FLAG_DATA;
                else
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
 
                flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
@@ -4126,7 +4137,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
                else
                        mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
 
-               NET_INC_STATS_BH(sock_net(sk), mib_idx);
+               NET_INC_STATS(sock_net(sk), mib_idx);
 
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
@@ -4150,7 +4161,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
 
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk);
 
                if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4300,7 +4311,7 @@ static bool tcp_try_coalesce(struct sock *sk,
 
        atomic_add(delta, &sk->sk_rmem_alloc);
        sk_mem_charge(sk, delta);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
        TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
        TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
        TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
@@ -4388,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        tcp_ecn_check_ce(tp, skb);
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
                tcp_drop(sk, skb);
                return;
        }
@@ -4397,7 +4408,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        tp->pred_flags = 0;
        inet_csk_schedule_ack(sk);
 
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
@@ -4452,7 +4463,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                        /* All the bits are present. Drop. */
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
                        tcp_drop(sk, skb);
                        skb = NULL;
                        tcp_dsack_set(sk, seq, end_seq);
@@ -4491,7 +4502,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
                __skb_unlink(skb1, &tp->out_of_order_queue);
                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                 TCP_SKB_CB(skb1)->end_seq);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
                tcp_drop(sk, skb1);
        }
 
@@ -4606,14 +4617,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
                        __set_current_state(TASK_RUNNING);
 
-                       local_bh_enable();
                        if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
                                tp->ucopy.len -= chunk;
                                tp->copied_seq += chunk;
                                eaten = (chunk == skb->len);
                                tcp_rcv_space_adjust(sk);
                        }
-                       local_bh_disable();
                }
 
                if (eaten <= 0) {
@@ -4656,7 +4665,7 @@ queue_and_out:
 
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                /* A retransmit, 2nd most common case.  Force an immediate ack. */
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
@@ -4702,7 +4711,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
 
        __skb_unlink(skb, list);
        __kfree_skb(skb);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 
        return next;
 }
@@ -4861,7 +4870,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
        bool res = false;
 
        if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
                __skb_queue_purge(&tp->out_of_order_queue);
 
                /* Reset SACK state.  A conforming SACK implementation will
@@ -4890,7 +4899,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
@@ -4920,7 +4929,7 @@ static int tcp_prune_queue(struct sock *sk)
         * drop receive data on the floor.  It will get retransmitted
         * and hopefully then we'll have sufficient space.
         */
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
 
        /* Massive buffer overcommit. */
        tp->pred_flags = 0;
@@ -5129,7 +5138,6 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        int chunk = skb->len - hlen;
        int err;
 
-       local_bh_enable();
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
        else
@@ -5141,32 +5149,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
                tcp_rcv_space_adjust(sk);
        }
 
-       local_bh_disable();
        return err;
 }
 
-static __sum16 __tcp_checksum_complete_user(struct sock *sk,
-                                           struct sk_buff *skb)
-{
-       __sum16 result;
-
-       if (sock_owned_by_user(sk)) {
-               local_bh_enable();
-               result = __tcp_checksum_complete(skb);
-               local_bh_disable();
-       } else {
-               result = __tcp_checksum_complete(skb);
-       }
-       return result;
-}
-
-static inline bool tcp_checksum_complete_user(struct sock *sk,
-                                            struct sk_buff *skb)
-{
-       return !skb_csum_unnecessary(skb) &&
-              __tcp_checksum_complete_user(sk, skb);
-}
-
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
@@ -5179,7 +5164,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                        if (!tcp_oow_rate_limited(sock_net(sk), skb,
                                                  LINUX_MIB_TCPACKSKIPPEDPAWS,
                                                  &tp->last_oow_ack_time))
@@ -5231,8 +5216,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
        if (th->syn) {
 syn_challenge:
                if (syn_inerr)
-                       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
                tcp_send_challenge_ack(sk, skb);
                goto discard;
        }
@@ -5347,7 +5332,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tcp_data_snd_check(sk);
                                return;
                        } else { /* Header too small */
-                               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+                               TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
                                goto discard;
                        }
                } else {
@@ -5375,12 +5360,13 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                        __skb_pull(skb, tcp_header_len);
                                        tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
-                                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
+                                       NET_INC_STATS(sock_net(sk),
+                                                       LINUX_MIB_TCPHPHITSTOUSER);
                                        eaten = 1;
                                }
                        }
                        if (!eaten) {
-                               if (tcp_checksum_complete_user(sk, skb))
+                               if (tcp_checksum_complete(skb))
                                        goto csum_error;
 
                                if ((int)skb->truesize > sk->sk_forward_alloc)
@@ -5397,7 +5383,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                tcp_rcv_rtt_measure_ts(sk, skb);
 
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
+                               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
                                eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
@@ -5424,7 +5410,7 @@ no_ack:
        }
 
 slow_path:
-       if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
+       if (len < (th->doff << 2) || tcp_checksum_complete(skb))
                goto csum_error;
 
        if (!th->ack && !th->rst && !th->syn)
@@ -5454,8 +5440,8 @@ step5:
        return;
 
 csum_error:
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
        tcp_drop(sk, skb);
@@ -5543,16 +5529,18 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
        if (data) { /* Retransmit unacked data in SYN */
                tcp_for_write_queue_from(data, sk) {
                        if (data == tcp_send_head(sk) ||
-                           __tcp_retransmit_skb(sk, data))
+                           __tcp_retransmit_skb(sk, data, 1))
                                break;
                }
                tcp_rearm_rto(sk);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+               NET_INC_STATS(sock_net(sk),
+                               LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                return true;
        }
        tp->syn_data_acked = tp->syn_data;
        if (tp->syn_data_acked)
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+               NET_INC_STATS(sock_net(sk),
+                               LINUX_MIB_TCPFASTOPENACTIVE);
 
        tcp_fastopen_add_skb(sk, synack);
 
@@ -5587,7 +5575,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
                             tcp_time_stamp)) {
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
+                       NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_PAWSACTIVEREJECTED);
                        goto reset_and_undo;
                }
 
@@ -5796,8 +5785,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
        int queued = 0;
        bool acceptable;
 
-       tp->rx_opt.saw_tstamp = 0;
-
        switch (sk->sk_state) {
        case TCP_CLOSE:
                goto discard;
@@ -5815,29 +5802,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
-                       /* Now we have several options: In theory there is
-                        * nothing else in the frame. KA9Q has an option to
-                        * send data with the syn, BSD accepts data with the
-                        * syn up to the [to be] advertised window and
-                        * Solaris 2.1 gives you a protocol error. For now
-                        * we just ignore it, that fits the spec precisely
-                        * and avoids incompatibilities. It would be nice in
-                        * future to drop through and process the data.
-                        *
-                        * Now that TTCP is starting to be used we ought to
-                        * queue this data.
-                        * But, this leaves one open to an easy denial of
-                        * service attack, and SYN cookies can't defend
-                        * against this problem. So, we drop the data
-                        * in the interest of security over speed unless
-                        * it's still in use.
-                        */
-                       kfree_skb(skb);
+                       consume_skb(skb);
                        return 0;
                }
                goto discard;
 
        case TCP_SYN_SENT:
+               tp->rx_opt.saw_tstamp = 0;
                queued = tcp_rcv_synsent_state_process(sk, skb, th);
                if (queued >= 0)
                        return queued;
@@ -5849,6 +5820,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
+       tp->rx_opt.saw_tstamp = 0;
        req = tp->fastopen_rsk;
        if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
@@ -5973,7 +5945,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
                        tcp_done(sk);
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                        return 1;
                }
 
@@ -6030,7 +6002,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
                        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+                               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                tcp_reset(sk);
                                return 1;
                        }
@@ -6168,10 +6140,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        if (net->ipv4.sysctl_tcp_syncookies) {
                msg = "Sending cookies";
                want_cookie = true;
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
        } else
 #endif
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
        if (!queue->synflood_warned &&
            net->ipv4.sysctl_tcp_syncookies != 2 &&
@@ -6232,7 +6204,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
         * timeout.
         */
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
        }
 
@@ -6279,7 +6251,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                        if (dst && strict &&
                            !tcp_peer_is_proven(req, dst, true,
                                                tmp_opt.saw_tstamp)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+                               NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
                                goto drop_and_release;
                        }
                }
@@ -6327,7 +6299,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        }
        if (fastopen_sk) {
                af_ops->send_synack(fastopen_sk, dst, &fl, req,
-                                   &foc, false);
+                                   &foc, TCP_SYNACK_FASTOPEN);
                /* Add the child socket directly into the accept queue */
                inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
                sk->sk_data_ready(sk);
@@ -6337,8 +6309,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                tcp_rsk(req)->tfo_listener = false;
                if (!want_cookie)
                        inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-               af_ops->send_synack(sk, dst, &fl, req,
-                                   &foc, !want_cookie);
+               af_ops->send_synack(sk, dst, &fl, req, &foc,
+                                   !want_cookie ? TCP_SYNACK_NORMAL :
+                                                  TCP_SYNACK_COOKIE);
                if (want_cookie) {
                        reqsk_free(req);
                        return 0;
index f4f2a0a3849d3dd1b8820f2d1b84edb6d3c05235..8219d0d8dc8370d0d3e6fc4cd17b4925617968ab 100644 (file)
@@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
         * an established socket here.
         */
        if (seq != tcp_rsk(req)->snt_isn) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
        } else if (abort) {
                /*
                 * Still in SYN_RECV, just remove it silently.
@@ -372,7 +372,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                                       th->dest, iph->saddr, ntohs(th->source),
                                       inet_iif(icmp_skb));
        if (!sk) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
        }
        if (sk->sk_state == TCP_TIME_WAIT) {
@@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
         */
        if (sock_owned_by_user(sk)) {
                if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
-                       NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+                       __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
        }
        if (sk->sk_state == TCP_CLOSE)
                goto out;
 
        if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto out;
        }
 
@@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -692,13 +692,15 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 
        arg.tos = ip_hdr(skb)->tos;
+       local_bh_disable();
        ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+       local_bh_enable();
 
 #ifdef CONFIG_TCP_MD5SIG
 out:
@@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net,
        if (oif)
                arg.bound_dev_if = oif;
        arg.tos = tos;
+       local_bh_disable();
        ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+       local_bh_enable();
 }
 
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -830,7 +834,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
                              struct tcp_fastopen_cookie *foc,
-                                 bool attach_req)
+                             enum tcp_synack_type synack_type)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -841,7 +845,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
 
-       skb = tcp_make_synack(sk, dst, req, foc, attach_req);
+       skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
                return false;
 
        if (hash_expected && !hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
                return true;
        }
 
        if (!hash_expected && hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
                return true;
        }
 
@@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        return newsk;
 
 exit_overflow:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit_nonewsk:
        dst_release(dst);
 exit:
@@ -1432,8 +1436,8 @@ discard:
        return 0;
 
 csum_err:
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
        goto discard;
 }
 EXPORT_SYMBOL(tcp_v4_do_rcv);
@@ -1506,16 +1510,16 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 
        __skb_queue_tail(&tp->ucopy.prequeue, skb);
        tp->ucopy.memory += skb->truesize;
-       if (tp->ucopy.memory > sk->sk_rcvbuf) {
+       if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
+           tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
                struct sk_buff *skb1;
 
                BUG_ON(sock_owned_by_user(sk));
+               __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
+                               skb_queue_len(&tp->ucopy.prequeue));
 
-               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk_backlog_rcv(sk, skb1);
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPPREQUEUEDROPPED);
-               }
 
                tp->ucopy.memory = 0;
        } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
@@ -1547,7 +1551,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
                goto discard_it;
 
        /* Count it even if it's bad */
-       TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_INSEGS);
 
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
@@ -1629,7 +1633,7 @@ process:
                }
        }
        if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto discard_and_relse;
        }
 
@@ -1662,7 +1666,7 @@ process:
        } else if (unlikely(sk_add_backlog(sk, skb,
                                           sk->sk_rcvbuf + sk->sk_sndbuf))) {
                bh_unlock_sock(sk);
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
        }
        bh_unlock_sock(sk);
@@ -1679,9 +1683,9 @@ no_tcp_socket:
 
        if (tcp_checksum_complete(skb)) {
 csum_error:
-               TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
+               __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
 bad_packet:
-               TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+               __TCP_INC_STATS(net, TCP_MIB_INERRS);
        } else {
                tcp_v4_send_reset(NULL, skb);
        }
@@ -1835,7 +1839,9 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_free_fastopen_req(tp);
        tcp_saved_syn_free(tp);
 
+       local_bh_disable();
        sk_sockets_allocated_dec(sk);
+       local_bh_enable();
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
                sock_release_memcg(sk);
index 1e70fa8fa793fdbca3ca782231f8b775ed96f205..c67ece1390c253304454cd41eed59cae26dd10a8 100644 (file)
@@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
  * newReno in increase case.
  * We work it out by following the idea from TCP-LP's paper directly
  */
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct lp *lp = inet_csk_ca(sk);
 
-       if (rtt_us > 0)
-               tcp_lp_rtt_sample(sk, rtt_us);
+       if (sample->rtt_us > 0)
+               tcp_lp_rtt_sample(sk, sample->rtt_us);
 
        /* calc inference */
        if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
index 7b7eec4399069249ef949ef4b287e8cd0ef4df91..b617826e24770a09ba07202b3d546ae248b68a50 100644 (file)
@@ -800,7 +800,8 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
        }
 
        if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
-                         jiffies - tm->tcpm_stamp) < 0)
+                         jiffies - tm->tcpm_stamp,
+                         TCP_METRICS_ATTR_PAD) < 0)
                goto nla_put_failure;
        if (tm->tcpm_ts_stamp) {
                if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
@@ -864,7 +865,8 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
                    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
                                tfom->syn_loss) < 0 ||
                     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
-                               jiffies - tfom->last_syn_loss) < 0))
+                               jiffies - tfom->last_syn_loss,
+                               TCP_METRICS_ATTR_PAD) < 0))
                        goto nla_put_failure;
                if (tfom->cookie.len > 0 &&
                    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
index 4c53e7c865864df29f0980c4c756e6daee966c7f..4b95ec4ed2c8f7cc50f290c2d354a5a4931d7116 100644 (file)
@@ -235,7 +235,7 @@ kill:
        }
 
        if (paws_reject)
-               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
+               __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
 
        if (!th->rst) {
                /* In this case we must reset the TIMEWAIT timer.
@@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                 * socket up.  We've got bigger problems than
                 * non-graceful socket closings.
                 */
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
        }
 
        tcp_update_metrics(sk);
@@ -545,7 +545,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                newtp->rack.mstamp.v64 = 0;
                newtp->rack.advanced = 0;
 
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+               __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
        }
        return newsk;
 }
@@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                                          &tcp_rsk(req)->last_oow_ack_time))
                        req->rsk_ops->send_ack(sk, skb, req);
                if (paws_reject)
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                return NULL;
        }
 
@@ -729,7 +729,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         *         "fourth, check the SYN bit"
         */
        if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+               __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
                goto embryonic_reset;
        }
 
@@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
                inet_rsk(req)->acked = 1;
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
                return NULL;
        }
 
@@ -791,7 +791,7 @@ embryonic_reset:
        }
        if (!fastopen) {
                inet_csk_reqsk_queue_drop(sk, req);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
        }
        return NULL;
 }
index 773083b7f1e98906f5cdc93f770829f293fea205..02737b607aa716d2249aa507793abfb500f7bfeb 100644 (file)
@@ -89,6 +89,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                             ~(SKB_GSO_TCPV4 |
                               SKB_GSO_DODGY |
                               SKB_GSO_TCP_ECN |
+                              SKB_GSO_TCP_FIXEDID |
                               SKB_GSO_TCPV6 |
                               SKB_GSO_GRE |
                               SKB_GSO_GRE_CSUM |
@@ -98,7 +99,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                               SKB_GSO_UDP_TUNNEL_CSUM |
                               SKB_GSO_TUNNEL_REMCSUM |
                               0) ||
-                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
+                            !(type & (SKB_GSO_TCPV4 |
+                                      SKB_GSO_TCPV6))))
                        goto out;
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
@@ -107,6 +109,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
+       /* GSO partial only requires splitting the frame into an MSS
+        * multiple and possibly a remainder.  So update the mss now.
+        */
+       if (features & NETIF_F_GSO_PARTIAL)
+               mss = skb->len - (skb->len % mss);
+
        copy_destructor = gso_skb->destructor == tcp_wfree;
        ooo_okay = gso_skb->ooo_okay;
        /* All segments but the first should have ooo_okay cleared */
@@ -131,7 +139,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
                                               (__force u32)delta));
 
-       do {
+       while (skb->next) {
                th->fin = th->psh = 0;
                th->check = newcheck;
 
@@ -151,7 +159,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 
                th->seq = htonl(seq);
                th->cwr = 0;
-       } while (skb->next);
+       }
 
        /* Following permits TCP Small Queues to work well with GSO :
         * The callback to TCP stack will be called at the time last frag
@@ -237,7 +245,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
 found:
        /* Include the IP ID check below from the inner most IP hdr */
-       flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
+       flush = NAPI_GRO_CB(p)->flush;
        flush |= (__force int)(flags & TCP_FLAG_CWR);
        flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
                  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -246,6 +254,17 @@ found:
                flush |= *(u32 *)((u8 *)th + i) ^
                         *(u32 *)((u8 *)th2 + i);
 
+       /* When we receive our second frame we can made a decision on if we
+        * continue this flow as an atomic flow with a fixed ID or if we use
+        * an incrementing ID.
+        */
+       if (NAPI_GRO_CB(p)->flush_id != 1 ||
+           NAPI_GRO_CB(p)->count != 1 ||
+           !NAPI_GRO_CB(p)->is_atomic)
+               flush |= NAPI_GRO_CB(p)->flush_id;
+       else
+               NAPI_GRO_CB(p)->is_atomic = false;
+
        mss = skb_shinfo(p)->gso_size;
 
        flush |= (len - 1) >= mss;
@@ -314,6 +333,9 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
                                  iph->daddr, 0);
        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
+       if (NAPI_GRO_CB(skb)->is_atomic)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
        return tcp_gro_complete(skb);
 }
 
index 7d2dc015cd19a64505c968df43c544adabb65e0e..8daefd8b1b495f23e45b6c9e6b70525715498c08 100644 (file)
@@ -949,7 +949,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        skb_orphan(skb);
        skb->sk = sk;
-       skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
+       skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
        skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
@@ -1111,11 +1111,17 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
        tcp_verify_left_out(tp);
 }
 
+static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
+{
+       return TCP_SKB_CB(skb)->txstamp_ack ||
+               (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
+}
+
 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-       if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
+       if (unlikely(tcp_has_tx_tstamp(skb)) &&
            !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
                struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
                u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
@@ -1123,9 +1129,17 @@ static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
                shinfo->tx_flags &= ~tsflags;
                shinfo2->tx_flags |= tsflags;
                swap(shinfo->tskey, shinfo2->tskey);
+               TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
+               TCP_SKB_CB(skb)->txstamp_ack = 0;
        }
 }
 
+static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
+{
+       TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
+       TCP_SKB_CB(skb)->eor = 0;
+}
+
 /* Function to create two new TCP segments.  Shrinks the given segment
  * to the specified size and appends a new segment with the rest of the
  * packet to the list.  This won't be called frequently, I hope.
@@ -1171,6 +1185,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
        TCP_SKB_CB(buff)->tcp_flags = flags;
        TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
+       tcp_skb_fragment_eor(skb, buff);
 
        if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
                /* Copy and checksum data tail into the new buffer. */
@@ -1731,6 +1746,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
        /* This packet was never sent out yet, so no SACK bits. */
        TCP_SKB_CB(buff)->sacked = 0;
 
+       tcp_skb_fragment_eor(skb, buff);
+
        buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
        skb_split(skb, buff, len);
        tcp_fragment_tstamp(skb, buff);
@@ -2204,14 +2221,13 @@ bool tcp_schedule_loss_probe(struct sock *sk)
 /* Thanks to skb fast clones, we can detect if a prior transmit of
  * a packet is still in a qdisc or driver queue.
  * In this case, there is very little point doing a retransmit !
- * Note: This is called from BH context only.
  */
 static bool skb_still_in_host_queue(const struct sock *sk,
                                    const struct sk_buff *skb)
 {
        if (unlikely(skb_fclone_busy(sk, skb))) {
-               NET_INC_STATS_BH(sock_net(sk),
-                                LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+               NET_INC_STATS(sock_net(sk),
+                             LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
                return true;
        }
        return false;
@@ -2266,14 +2282,14 @@ void tcp_send_loss_probe(struct sock *sk)
        if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
                goto rearm_timer;
 
-       if (__tcp_retransmit_skb(sk, skb))
+       if (__tcp_retransmit_skb(sk, skb, 1))
                goto rearm_timer;
 
        /* Record snd_nxt for loss detection. */
        tp->tlp_high_seq = tp->snd_nxt;
 
 probe_sent:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
        /* Reset s.t. tcp_rearm_rto will restart timer from now */
        inet_csk(sk)->icsk_pending = 0;
 rearm_timer:
@@ -2441,6 +2457,21 @@ u32 __tcp_select_window(struct sock *sk)
        return window;
 }
 
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb)
+{
+       if (unlikely(tcp_has_tx_tstamp(next_skb))) {
+               const struct skb_shared_info *next_shinfo =
+                       skb_shinfo(next_skb);
+               struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+               shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
+               shinfo->tskey = next_shinfo->tskey;
+               TCP_SKB_CB(skb)->txstamp_ack |=
+                       TCP_SKB_CB(next_skb)->txstamp_ack;
+       }
+}
+
 /* Collapses two adjacent SKB's during retransmission. */
 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 {
@@ -2476,6 +2507,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
         * packet counting does not break.
         */
        TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
+       TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
 
        /* changed transmit queue under us so clear hints */
        tcp_clear_retrans_hints_partial(tp);
@@ -2484,6 +2516,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 
        tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
 
+       tcp_skb_collapse_tstamp(skb, next_skb);
+
        sk_wmem_free_skb(sk, next_skb);
 }
 
@@ -2525,6 +2559,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
                if (!tcp_can_collapse(sk, skb))
                        break;
 
+               if (!tcp_skb_can_collapse_to(to))
+                       break;
+
                space -= skb->len;
 
                if (first) {
@@ -2551,17 +2588,17 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
  * state updates are done by the caller.  Returns non-zero if an
  * error occurred which prevented the send.
  */
-int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
        unsigned int cur_mss;
-       int err;
+       int diff, len, err;
 
-       /* Inconslusive MTU probe */
-       if (icsk->icsk_mtup.probe_size) {
+
+       /* Inconclusive MTU probe */
+       if (icsk->icsk_mtup.probe_size)
                icsk->icsk_mtup.probe_size = 0;
-       }
 
        /* Do not sent more than we queued. 1/4 is reserved for possible
         * copying overhead: fragmentation, tunneling, mangling etc.
@@ -2594,30 +2631,27 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
            TCP_SKB_CB(skb)->seq != tp->snd_una)
                return -EAGAIN;
 
-       if (skb->len > cur_mss) {
-               if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
+       len = cur_mss * segs;
+       if (skb->len > len) {
+               if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC))
                        return -ENOMEM; /* We'll try again later. */
        } else {
-               int oldpcount = tcp_skb_pcount(skb);
+               if (skb_unclone(skb, GFP_ATOMIC))
+                       return -ENOMEM;
 
-               if (unlikely(oldpcount > 1)) {
-                       if (skb_unclone(skb, GFP_ATOMIC))
-                               return -ENOMEM;
-                       tcp_init_tso_segs(skb, cur_mss);
-                       tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
-               }
+               diff = tcp_skb_pcount(skb);
+               tcp_set_skb_tso_segs(skb, cur_mss);
+               diff -= tcp_skb_pcount(skb);
+               if (diff)
+                       tcp_adjust_pcount(sk, skb, diff);
+               if (skb->len < cur_mss)
+                       tcp_retrans_try_collapse(sk, skb, cur_mss);
        }
 
        /* RFC3168, section 6.1.1.1. ECN fallback */
        if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
                tcp_ecn_clear_syn(sk, skb);
 
-       tcp_retrans_try_collapse(sk, skb, cur_mss);
-
-       /* Make a copy, if the first transmission SKB clone we made
-        * is still in somebody's hands, else make a clone.
-        */
-
        /* make sure skb->data is aligned on arches that require it
         * and check if ack-trimming & collapsing extended the headroom
         * beyond what csum_start can cover.
@@ -2633,20 +2667,22 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        }
 
        if (likely(!err)) {
+               segs = tcp_skb_pcount(skb);
+
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
                /* Update global TCP statistics. */
-               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-               tp->total_retrans++;
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               tp->total_retrans += segs;
        }
        return err;
 }
 
-int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int err = __tcp_retransmit_skb(sk, skb);
+       int err = __tcp_retransmit_skb(sk, skb, segs);
 
        if (err == 0) {
 #if FASTRETRANS_DEBUG > 0
@@ -2662,7 +2698,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                        tp->retrans_stamp = tcp_skb_timestamp(skb);
 
        } else if (err != -EBUSY) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
 
        if (tp->undo_retrans < 0)
@@ -2737,6 +2773,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 
        tcp_for_write_queue_from(skb, sk) {
                __u8 sacked = TCP_SKB_CB(skb)->sacked;
+               int segs;
 
                if (skb == tcp_send_head(sk))
                        break;
@@ -2744,14 +2781,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                if (!hole)
                        tp->retransmit_skb_hint = skb;
 
-               /* Assume this retransmit will generate
-                * only one packet for congestion window
-                * calculation purposes.  This works because
-                * tcp_retransmit_skb() will chop up the
-                * packet to be MSS sized and all the
-                * packet counting works out.
-                */
-               if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+               segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
+               if (segs <= 0)
                        return;
 
                if (fwd_rexmitting) {
@@ -2788,10 +2819,10 @@ begin_fwd:
                if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (tcp_retransmit_skb(sk, skb))
+               if (tcp_retransmit_skb(sk, skb, segs))
                        return;
 
-               NET_INC_STATS_BH(sock_net(sk), mib_idx);
+               NET_INC_STATS(sock_net(sk), mib_idx);
 
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
@@ -2944,7 +2975,7 @@ int tcp_send_synack(struct sock *sk)
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
                                struct tcp_fastopen_cookie *foc,
-                               bool attach_req)
+                               enum tcp_synack_type synack_type)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        const struct tcp_sock *tp = tcp_sk(sk);
@@ -2964,14 +2995,22 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        /* Reserve space for headers. */
        skb_reserve(skb, MAX_TCP_HEADER);
 
-       if (attach_req) {
+       switch (synack_type) {
+       case TCP_SYNACK_NORMAL:
                skb_set_owner_w(skb, req_to_sk(req));
-       } else {
+               break;
+       case TCP_SYNACK_COOKIE:
+               /* Under synflood, we do not attach skb to a socket,
+                * to avoid false sharing.
+                */
+               break;
+       case TCP_SYNACK_FASTOPEN:
                /* sk is a const pointer, because we want to express multiple
                 * cpu might call us concurrently.
                 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
                 */
                skb_set_owner_w(skb, (struct sock *)sk);
+               break;
        }
        skb_dst_set(skb, dst);
 
@@ -3020,7 +3059,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        th->window = htons(min(req->rsk_rcv_wnd, 65535U));
        tcp_options_write((__be32 *)(th + 1), NULL, &opts);
        th->doff = (tcp_header_size >> 2);
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
+       __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Okay, we have all we need - do the md5 hash if needed */
@@ -3516,10 +3555,10 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        int res;
 
        tcp_rsk(req)->txhash = net_tx_rndhash();
-       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
+       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
        if (!res) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
        }
        return res;
 }
index 5353085fd0b2fa431bb7c7a958b7e4062ffd875a..e36df4fcfeba3042f3d84337a18311427f68a418 100644 (file)
@@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk)
                        if (scb->sacked & TCPCB_SACKED_RETRANS) {
                                scb->sacked &= ~TCPCB_SACKED_RETRANS;
                                tp->retrans_out -= tcp_skb_pcount(skb);
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPLOSTRETRANSMIT);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPLOSTRETRANSMIT);
                        }
                } else if (!(scb->sacked & TCPCB_RETRANS)) {
                        /* Original data are sent sequentially so stop early
index 49bc474f8e35ee50407622ab02867df698bc5117..debdd8b33e69478f7fa108c72e3787b55af6af34 100644 (file)
@@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk)
        sk->sk_error_report(sk);
 
        tcp_done(sk);
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 }
 
 /* Do not allow orphaned sockets to eat all our resources.
@@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
                if (do_reset)
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                tcp_done(sk);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
                return 1;
        }
        return 0;
@@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk)
                        if (tp->syn_fastopen || tp->syn_data)
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
                        if (tp->syn_data && icsk->icsk_retransmits == 1)
-                               NET_INC_STATS_BH(sock_net(sk),
-                                                LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                syn_set = true;
@@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk)
                            tp->bytes_acked <= tp->rx_opt.mss_clamp) {
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
                                if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-                                       NET_INC_STATS_BH(sock_net(sk),
-                                                        LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+                                       NET_INC_STATS(sock_net(sk),
+                                                     LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                        }
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
@@ -209,6 +209,7 @@ static int tcp_write_timeout(struct sock *sk)
        return 0;
 }
 
+/* Called with BH disabled */
 void tcp_delack_timer_handler(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -228,7 +229,7 @@ void tcp_delack_timer_handler(struct sock *sk)
        if (!skb_queue_empty(&tp->ucopy.prequeue)) {
                struct sk_buff *skb;
 
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
 
                while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk_backlog_rcv(sk, skb);
@@ -248,7 +249,7 @@ void tcp_delack_timer_handler(struct sock *sk)
                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
                tcp_send_ack(sk);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
 
 out:
@@ -265,7 +266,7 @@ static void tcp_delack_timer(unsigned long data)
                tcp_delack_timer_handler(sk);
        } else {
                inet_csk(sk)->icsk_ack.blocked = 1;
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                /* deleguate our work to tcp_release_cb() */
                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
                        sock_hold(sk);
@@ -404,7 +405,7 @@ void tcp_retransmit_timer(struct sock *sk)
                        goto out;
                }
                tcp_enter_loss(sk);
-               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
                __sk_dst_reset(sk);
                goto out_reset_timer;
        }
@@ -431,12 +432,12 @@ void tcp_retransmit_timer(struct sock *sk)
                } else {
                        mib_idx = LINUX_MIB_TCPTIMEOUTS;
                }
-               NET_INC_STATS_BH(sock_net(sk), mib_idx);
+               __NET_INC_STATS(sock_net(sk), mib_idx);
        }
 
        tcp_enter_loss(sk);
 
-       if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
+       if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
                /* Retransmission failed because of local congestion,
                 * do not backoff.
                 */
@@ -493,6 +494,7 @@ out_reset_timer:
 out:;
 }
 
+/* Called with BH disabled */
 void tcp_write_timer_handler(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -549,7 +551,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
 {
        struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 
-       NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
+       __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
 }
 EXPORT_SYMBOL(tcp_syn_ack_timeout);
 
index 13951c4087d407b72cb5bc2ee75822203244e3f3..4c4bac1b5eab221928c569592c833e1bfcba748d 100644 (file)
@@ -107,16 +107,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init);
  *   o min-filter RTT samples from a much longer window (forever for now)
  *     to find the propagation delay (baseRTT)
  */
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 {
        struct vegas *vegas = inet_csk_ca(sk);
        u32 vrtt;
 
-       if (rtt_us < 0)
+       if (sample->rtt_us < 0)
                return;
 
        /* Never allow zero rtt or baseRTT */
-       vrtt = rtt_us + 1;
+       vrtt = sample->rtt_us + 1;
 
        /* Filter to find propagation delay: */
        if (vrtt < vegas->baseRTT)
index ef9da5306c685b269cc1efe64ee40196faf11e66..248cfc0ff9aeb78b146c5c77496031b9014fce67 100644 (file)
@@ -17,7 +17,7 @@ struct vegas {
 
 void tcp_vegas_init(struct sock *sk);
 void tcp_vegas_state(struct sock *sk, u8 ca_state);
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample);
 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
 size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
                          union tcp_cc_info *info);
index 0d094b995cd96f8c5150daf586cdde0f495843f5..40171e163cffa723f3eb1539740a8a7c3e963f53 100644 (file)
@@ -69,16 +69,17 @@ static void tcp_veno_init(struct sock *sk)
 }
 
 /* Do rtt sampling needed for Veno. */
-static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+static void tcp_veno_pkts_acked(struct sock *sk,
+                               const struct ack_sample *sample)
 {
        struct veno *veno = inet_csk_ca(sk);
        u32 vrtt;
 
-       if (rtt_us < 0)
+       if (sample->rtt_us < 0)
                return;
 
        /* Never allow zero rtt or baseRTT */
-       vrtt = rtt_us + 1;
+       vrtt = sample->rtt_us + 1;
 
        /* Filter to find propagation delay: */
        if (vrtt < veno->basertt)
index c10732e39837872c724b801700f627a7fb1c9390..4b03a2e2a0504617813838746c13691cf86557f6 100644 (file)
@@ -99,12 +99,13 @@ static void westwood_filter(struct westwood *w, u32 delta)
  * Called after processing group of packets.
  * but all westwood needs is the last sample of srtt.
  */
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
+static void tcp_westwood_pkts_acked(struct sock *sk,
+                                   const struct ack_sample *sample)
 {
        struct westwood *w = inet_csk_ca(sk);
 
-       if (rtt > 0)
-               w->rtt = usecs_to_jiffies(rtt);
+       if (sample->rtt_us > 0)
+               w->rtt = usecs_to_jiffies(sample->rtt_us);
 }
 
 /*
index 3e6a472e6b8831439d3f745b8efd43096612c39d..028eb046ea40d510908040023b46b46469477505 100644 (file)
@@ -56,15 +56,16 @@ static void tcp_yeah_init(struct sock *sk)
        tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
+static void tcp_yeah_pkts_acked(struct sock *sk,
+                               const struct ack_sample *sample)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct yeah *yeah = inet_csk_ca(sk);
 
        if (icsk->icsk_ca_state == TCP_CA_Open)
-               yeah->pkts_acked = pkts_acked;
+               yeah->pkts_acked = sample->pkts_acked;
 
-       tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
+       tcp_vegas_pkts_acked(sk, sample);
 }
 
 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
index 3563788d064fb2dd1a34398ae1dd90766d7f4f1d..f67f52ba480938d913193e11f1da3102913b9687 100644 (file)
@@ -336,8 +336,13 @@ found:
 
                hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
                spin_lock(&hslot2->lock);
-               hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
-                                        &hslot2->head);
+               if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+                   sk->sk_family == AF_INET6)
+                       hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                          &hslot2->head);
+               else
+                       hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                          &hslot2->head);
                hslot2->count++;
                spin_unlock(&hslot2->lock);
        }
@@ -683,7 +688,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
                        iph->saddr, uh->source, skb->dev->ifindex, udptable,
                        NULL);
        if (!sk) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return; /* No socket for error */
        }
 
@@ -877,13 +882,13 @@ send:
        err = ip_send_skb(sock_net(sk), skb);
        if (err) {
                if (err == -ENOBUFS && !inet->recverr) {
-                       UDP_INC_STATS_USER(sock_net(sk),
-                                          UDP_MIB_SNDBUFERRORS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk),
+                                     UDP_MIB_SNDBUFERRORS, is_udplite);
                        err = 0;
                }
        } else
-               UDP_INC_STATS_USER(sock_net(sk),
-                                  UDP_MIB_OUTDATAGRAMS, is_udplite);
+               UDP_INC_STATS(sock_net(sk),
+                             UDP_MIB_OUTDATAGRAMS, is_udplite);
        return err;
 }
 
@@ -1152,8 +1157,8 @@ out:
         * seems like overkill.
         */
        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-               UDP_INC_STATS_USER(sock_net(sk),
-                               UDP_MIB_SNDBUFERRORS, is_udplite);
+               UDP_INC_STATS(sock_net(sk),
+                             UDP_MIB_SNDBUFERRORS, is_udplite);
        }
        return err;
 
@@ -1237,10 +1242,10 @@ static unsigned int first_packet_length(struct sock *sk)
        spin_lock_bh(&rcvq->lock);
        while ((skb = skb_peek(rcvq)) != NULL &&
                udp_lib_checksum_complete(skb)) {
-               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
-                                IS_UDPLITE(sk));
-               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-                                IS_UDPLITE(sk));
+               __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+                               IS_UDPLITE(sk));
+               __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+                               IS_UDPLITE(sk));
                atomic_inc(&sk->sk_drops);
                __skb_unlink(skb, rcvq);
                __skb_queue_tail(&list_kill, skb);
@@ -1276,14 +1281,6 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
        {
                unsigned int amount = first_packet_length(sk);
 
-               if (amount)
-                       /*
-                        * We will only return the amount
-                        * of this packet since that is all
-                        * that will be read.
-                        */
-                       amount -= sizeof(struct udphdr);
-
                return put_user(amount, (int __user *)arg);
        }
 
@@ -1355,16 +1352,16 @@ try_again:
                trace_kfree_skb(skb, udp_recvmsg);
                if (!peeked) {
                        atomic_inc(&sk->sk_drops);
-                       UDP_INC_STATS_USER(sock_net(sk),
-                                          UDP_MIB_INERRORS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk),
+                                     UDP_MIB_INERRORS, is_udplite);
                }
                skb_free_datagram_locked(sk, skb);
                return err;
        }
 
        if (!peeked)
-               UDP_INC_STATS_USER(sock_net(sk),
-                               UDP_MIB_INDATAGRAMS, is_udplite);
+               UDP_INC_STATS(sock_net(sk),
+                             UDP_MIB_INDATAGRAMS, is_udplite);
 
        sock_recv_ts_and_drops(msg, sk, skb);
 
@@ -1377,7 +1374,7 @@ try_again:
                *addr_len = sizeof(*sin);
        }
        if (inet->cmsg_flags)
-               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
+               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -1389,8 +1386,8 @@ try_again:
 csum_copy_err:
        slow = lock_sock_fast(sk);
        if (!skb_kill_datagram(sk, skb, flags)) {
-               UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
-               UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+               UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+               UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        }
        unlock_sock_fast(sk, slow);
 
@@ -1517,9 +1514,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* Note that an ENOMEM error is charged twice */
                if (rc == -ENOMEM)
-                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-                                        is_udplite);
-               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                       is_udplite);
+               UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
                kfree_skb(skb);
                trace_udp_fail_queue_rcv_skb(rc, sk);
                return -1;
@@ -1583,9 +1580,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
-                               UDP_INC_STATS_BH(sock_net(sk),
-                                                UDP_MIB_INDATAGRAMS,
-                                                is_udplite);
+                               __UDP_INC_STATS(sock_net(sk),
+                                               UDP_MIB_INDATAGRAMS,
+                                               is_udplite);
                                return -ret;
                        }
                }
@@ -1636,8 +1633,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
-               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-                                is_udplite);
+               __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                               is_udplite);
                goto drop;
        }
 
@@ -1656,9 +1653,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        return rc;
 
 csum_error:
-       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+       __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
-       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+       __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        atomic_inc(&sk->sk_drops);
        kfree_skb(skb);
        return -1;
@@ -1718,10 +1715,10 @@ start_lookup:
 
                if (unlikely(!nskb)) {
                        atomic_inc(&sk->sk_drops);
-                       UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
-                                        IS_UDPLITE(sk));
-                       UDP_INC_STATS_BH(net, UDP_MIB_INERRORS,
-                                        IS_UDPLITE(sk));
+                       __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+                                       IS_UDPLITE(sk));
+                       __UDP_INC_STATS(net, UDP_MIB_INERRORS,
+                                       IS_UDPLITE(sk));
                        continue;
                }
                if (udp_queue_rcv_skb(sk, nskb) > 0)
@@ -1739,8 +1736,8 @@ start_lookup:
                        consume_skb(skb);
        } else {
                kfree_skb(skb);
-               UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-                                proto == IPPROTO_UDPLITE);
+               __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+                               proto == IPPROTO_UDPLITE);
        }
        return 0;
 }
@@ -1854,7 +1851,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp_lib_checksum_complete(skb))
                goto csum_error;
 
-       UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+       __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        /*
@@ -1881,9 +1878,9 @@ csum_error:
                            proto == IPPROTO_UDPLITE ? "Lite" : "",
                            &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
                            ulen);
-       UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
+       __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 drop:
-       UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
+       __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
        kfree_skb(skb);
        return 0;
 }
index 6230cf4b0d2daecb300b97bee3599d20ddd3e319..6b7459c92bb2a89c9372b9dafa4621031b624123 100644 (file)
@@ -39,8 +39,11 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
         * 16 bit length field due to the header being added outside of an
         * IP or IPv6 frame that was already limited to 64K - 1.
         */
-       partial = csum_sub(csum_unfold(uh->check),
-                          (__force __wsum)htonl(skb->len));
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
+               partial = (__force __wsum)uh->len;
+       else
+               partial = (__force __wsum)htonl(skb->len);
+       partial = csum_sub(csum_unfold(uh->check), partial);
 
        /* setup inner skb. */
        skb->encapsulation = 0;
@@ -89,7 +92,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
        udp_offset = outer_hlen - tnl_hlen;
        skb = segs;
        do {
-               __be16 len;
+               unsigned int len;
 
                if (remcsum)
                        skb->ip_summed = CHECKSUM_NONE;
@@ -107,14 +110,26 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                skb_reset_mac_header(skb);
                skb_set_network_header(skb, mac_len);
                skb_set_transport_header(skb, udp_offset);
-               len = htons(skb->len - udp_offset);
+               len = skb->len - udp_offset;
                uh = udp_hdr(skb);
-               uh->len = len;
+
+               /* If we are only performing partial GSO the inner header
+                * will be using a length value equal to only one MSS sized
+                * segment instead of the entire frame.
+                */
+               if (skb_is_gso(skb)) {
+                       uh->len = htons(skb_shinfo(skb)->gso_size +
+                                       SKB_GSO_CB(skb)->data_offset +
+                                       skb->head - (unsigned char *)uh);
+               } else {
+                       uh->len = htons(len);
+               }
 
                if (!need_csum)
                        continue;
 
-               uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len));
+               uh->check = ~csum_fold(csum_add(partial,
+                                      (__force __wsum)htonl(len)));
 
                if (skb->encapsulation || !offload_csum) {
                        uh->check = gso_make_checksum(skb, ~uh->check);
@@ -335,6 +350,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
 
        uh->len = newlen;
 
+       /* Set encapsulation before calling into inner gro_complete() functions
+        * to make them set up the inner offsets.
+        */
+       skb->encapsulation = 1;
+
        rcu_read_lock();
        sk = (*lookup)(skb, uh->source, uh->dest);
        if (sk && udp_sk(sk)->gro_complete)
@@ -345,9 +365,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
        if (skb->remcsum_offload)
                skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
 
-       skb->encapsulation = 1;
-       skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
-
        return err;
 }
 EXPORT_SYMBOL(udp_gro_complete);
index 11e875ffd7ace7360f3f03759cc0b81bc8a11abf..3f8411328de54d2101433f89fea20bef62cefa9a 100644 (file)
@@ -218,6 +218,7 @@ config IPV6_GRE
        tristate "IPv6: GRE tunnel"
        select IPV6_TUNNEL
        select NET_IP_TUNNEL
+       depends on NET_IPGRE_DEMUX
        ---help---
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
index 27aed1afcf81c0a516bb6768e20b27a6794fb1cc..47f837a58e0ad83f152272af0c15c6361e535cf7 100644 (file)
@@ -359,7 +359,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
                ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
 
        ndev->cnf.mtu6 = dev->mtu;
-       ndev->cnf.sysctl = NULL;
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
        if (!ndev->nd_parms) {
                kfree(ndev);
@@ -3176,35 +3175,9 @@ static void addrconf_gre_config(struct net_device *dev)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-/* If the host route is cached on the addr struct make sure it is associated
- * with the proper table. e.g., enslavement can change and if so the cached
- * host route needs to move to the new table.
- */
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-                                 struct inet6_ifaddr *ifp)
-{
-       if (ifp->rt) {
-               u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
-
-               if (tb_id != ifp->rt->rt6i_table->tb6_id) {
-                       ip6_del_rt(ifp->rt);
-                       ifp->rt = NULL;
-               }
-       }
-}
-#else
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-                                 struct inet6_ifaddr *ifp)
-{
-}
-#endif
-
 static int fixup_permanent_addr(struct inet6_dev *idev,
                                struct inet6_ifaddr *ifp)
 {
-       l3mdev_check_host_rt(idev, ifp);
-
        if (!ifp->rt) {
                struct rt6_info *rt;
 
@@ -3255,6 +3228,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
        int run_pending = 0;
        int err;
@@ -3303,6 +3277,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        break;
 
                if (event == NETDEV_UP) {
+                       /* restore routes for permanent addresses */
+                       addrconf_permanent_addr(dev);
+
                        if (!addrconf_qdisc_ok(dev)) {
                                /* device is not ready yet. */
                                pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3336,9 +3313,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        run_pending = 1;
                }
 
-               /* restore routes for permanent addresses */
-               addrconf_permanent_addr(dev);
-
                switch (dev->type) {
 #if IS_ENABLED(CONFIG_IPV6_SIT)
                case ARPHRD_SIT:
@@ -3413,6 +3387,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                if (idev)
                        addrconf_type_change(dev, event);
                break;
+
+       case NETDEV_CHANGEUPPER:
+               info = ptr;
+
+               /* flush all routes if dev is linked to or unlinked from
+                * an L3 master device (e.g., VRF)
+                */
+               if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+                       addrconf_ifdown(dev, 0);
        }
 
        return NOTIFY_OK;
@@ -3438,6 +3421,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
                ipv6_mc_unmap(idev);
 }
 
+static bool addr_is_local(const struct in6_addr *addr)
+{
+       return ipv6_addr_type(addr) &
+               (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
        struct net *net = dev_net(dev);
@@ -3495,7 +3484,8 @@ restart:
                                 * address is retained on a down event
                                 */
                                if (!keep_addr ||
-                                   !(ifa->flags & IFA_F_PERMANENT)) {
+                                   !(ifa->flags & IFA_F_PERMANENT) ||
+                                   addr_is_local(&ifa->addr)) {
                                        hlist_del_init_rcu(&ifa->addr_lst);
                                        goto restart;
                                }
@@ -3539,17 +3529,23 @@ restart:
 
        INIT_LIST_HEAD(&del_list);
        list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+               struct rt6_info *rt = NULL;
+
                addrconf_del_dad_work(ifa);
 
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
 
-               if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
+               if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+                   !addr_is_local(&ifa->addr)) {
                        /* set state to skip the notifier below */
                        state = INET6_IFADDR_STATE_DEAD;
                        ifa->state = 0;
                        if (!(ifa->flags & IFA_F_NODAD))
                                ifa->flags |= IFA_F_TENTATIVE;
+
+                       rt = ifa->rt;
+                       ifa->rt = NULL;
                } else {
                        state = ifa->state;
                        ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3560,6 +3556,9 @@ restart:
 
                spin_unlock_bh(&ifa->lock);
 
+               if (rt)
+                       ip6_del_rt(rt);
+
                if (state != INET6_IFADDR_STATE_DEAD) {
                        __ipv6_ifa_notify(RTM_DELADDR, ifa);
                        inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -4995,15 +4994,13 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 {
        struct inet6_ifaddr *ifp;
        struct net_device *dev = idev->dev;
-       bool update_rs = false;
+       bool clear_token, update_rs = false;
        struct in6_addr ll_addr;
 
        ASSERT_RTNL();
 
        if (!token)
                return -EINVAL;
-       if (ipv6_addr_any(token))
-               return -EINVAL;
        if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
                return -EINVAL;
        if (!ipv6_accept_ra(idev))
@@ -5018,10 +5015,13 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        write_unlock_bh(&idev->lock);
 
+       clear_token = ipv6_addr_any(token);
+       if (clear_token)
+               goto update_lft;
+
        if (!idev->dead && (idev->if_flags & IF_READY) &&
            !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
                             IFA_F_OPTIMISTIC)) {
-
                /* If we're not ready, then normal ifup will take care
                 * of this. Otherwise, we need to request our rs here.
                 */
@@ -5029,6 +5029,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
                update_rs = true;
        }
 
+update_lft:
        write_lock_bh(&idev->lock);
 
        if (update_rs) {
@@ -5325,10 +5326,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                        if (rt)
                                ip6_del_rt(rt);
                }
-               dst_hold(&ifp->rt->dst);
-
-               ip6_del_rt(ifp->rt);
-
+               if (ifp->rt) {
+                       dst_hold(&ifp->rt->dst);
+                       ip6_del_rt(ifp->rt);
+               }
                rt_genid_bump_ipv6(net);
                break;
        }
@@ -5618,376 +5619,366 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
        return ret;
 }
 
-static struct addrconf_sysctl_table
-{
-       struct ctl_table_header *sysctl_header;
-       struct ctl_table addrconf_vars[DEVCONF_MAX+1];
-} addrconf_sysctl __read_mostly = {
-       .sysctl_header = NULL,
-       .addrconf_vars = {
-               {
-                       .procname       = "forwarding",
-                       .data           = &ipv6_devconf.forwarding,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_forward,
-               },
-               {
-                       .procname       = "hop_limit",
-                       .data           = &ipv6_devconf.hop_limit,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_hop_limit,
-               },
-               {
-                       .procname       = "mtu",
-                       .data           = &ipv6_devconf.mtu6,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_mtu,
-               },
-               {
-                       .procname       = "accept_ra",
-                       .data           = &ipv6_devconf.accept_ra,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "accept_redirects",
-                       .data           = &ipv6_devconf.accept_redirects,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "autoconf",
-                       .data           = &ipv6_devconf.autoconf,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "dad_transmits",
-                       .data           = &ipv6_devconf.dad_transmits,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "router_solicitations",
-                       .data           = &ipv6_devconf.rtr_solicits,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "router_solicitation_interval",
-                       .data           = &ipv6_devconf.rtr_solicit_interval,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec_jiffies,
-               },
-               {
-                       .procname       = "router_solicitation_delay",
-                       .data           = &ipv6_devconf.rtr_solicit_delay,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec_jiffies,
-               },
-               {
-                       .procname       = "force_mld_version",
-                       .data           = &ipv6_devconf.force_mld_version,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "mldv1_unsolicited_report_interval",
-                       .data           =
-                               &ipv6_devconf.mldv1_unsolicited_report_interval,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec_ms_jiffies,
-               },
-               {
-                       .procname       = "mldv2_unsolicited_report_interval",
-                       .data           =
-                               &ipv6_devconf.mldv2_unsolicited_report_interval,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec_ms_jiffies,
-               },
-               {
-                       .procname       = "use_tempaddr",
-                       .data           = &ipv6_devconf.use_tempaddr,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "temp_valid_lft",
-                       .data           = &ipv6_devconf.temp_valid_lft,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "temp_prefered_lft",
-                       .data           = &ipv6_devconf.temp_prefered_lft,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "regen_max_retry",
-                       .data           = &ipv6_devconf.regen_max_retry,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "max_desync_factor",
-                       .data           = &ipv6_devconf.max_desync_factor,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "max_addresses",
-                       .data           = &ipv6_devconf.max_addresses,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "accept_ra_defrtr",
-                       .data           = &ipv6_devconf.accept_ra_defrtr,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "accept_ra_min_hop_limit",
-                       .data           = &ipv6_devconf.accept_ra_min_hop_limit,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "accept_ra_pinfo",
-                       .data           = &ipv6_devconf.accept_ra_pinfo,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
+static const struct ctl_table addrconf_sysctl[] = {
+       {
+               .procname       = "forwarding",
+               .data           = &ipv6_devconf.forwarding,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_forward,
+       },
+       {
+               .procname       = "hop_limit",
+               .data           = &ipv6_devconf.hop_limit,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_hop_limit,
+       },
+       {
+               .procname       = "mtu",
+               .data           = &ipv6_devconf.mtu6,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_mtu,
+       },
+       {
+               .procname       = "accept_ra",
+               .data           = &ipv6_devconf.accept_ra,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "accept_redirects",
+               .data           = &ipv6_devconf.accept_redirects,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "autoconf",
+               .data           = &ipv6_devconf.autoconf,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "dad_transmits",
+               .data           = &ipv6_devconf.dad_transmits,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "router_solicitations",
+               .data           = &ipv6_devconf.rtr_solicits,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "router_solicitation_interval",
+               .data           = &ipv6_devconf.rtr_solicit_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "router_solicitation_delay",
+               .data           = &ipv6_devconf.rtr_solicit_delay,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "force_mld_version",
+               .data           = &ipv6_devconf.force_mld_version,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "mldv1_unsolicited_report_interval",
+               .data           =
+                       &ipv6_devconf.mldv1_unsolicited_report_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+       },
+       {
+               .procname       = "mldv2_unsolicited_report_interval",
+               .data           =
+                       &ipv6_devconf.mldv2_unsolicited_report_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+       },
+       {
+               .procname       = "use_tempaddr",
+               .data           = &ipv6_devconf.use_tempaddr,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "temp_valid_lft",
+               .data           = &ipv6_devconf.temp_valid_lft,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "temp_prefered_lft",
+               .data           = &ipv6_devconf.temp_prefered_lft,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "regen_max_retry",
+               .data           = &ipv6_devconf.regen_max_retry,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "max_desync_factor",
+               .data           = &ipv6_devconf.max_desync_factor,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "max_addresses",
+               .data           = &ipv6_devconf.max_addresses,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "accept_ra_defrtr",
+               .data           = &ipv6_devconf.accept_ra_defrtr,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "accept_ra_min_hop_limit",
+               .data           = &ipv6_devconf.accept_ra_min_hop_limit,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "accept_ra_pinfo",
+               .data           = &ipv6_devconf.accept_ra_pinfo,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IPV6_ROUTER_PREF
-               {
-                       .procname       = "accept_ra_rtr_pref",
-                       .data           = &ipv6_devconf.accept_ra_rtr_pref,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "router_probe_interval",
-                       .data           = &ipv6_devconf.rtr_probe_interval,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec_jiffies,
-               },
+       {
+               .procname       = "accept_ra_rtr_pref",
+               .data           = &ipv6_devconf.accept_ra_rtr_pref,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "router_probe_interval",
+               .data           = &ipv6_devconf.rtr_probe_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
 #ifdef CONFIG_IPV6_ROUTE_INFO
-               {
-                       .procname       = "accept_ra_rt_info_max_plen",
-                       .data           = &ipv6_devconf.accept_ra_rt_info_max_plen,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
+       {
+               .procname       = "accept_ra_rt_info_max_plen",
+               .data           = &ipv6_devconf.accept_ra_rt_info_max_plen,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #endif
 #endif
-               {
-                       .procname       = "proxy_ndp",
-                       .data           = &ipv6_devconf.proxy_ndp,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_proxy_ndp,
-               },
-               {
-                       .procname       = "accept_source_route",
-                       .data           = &ipv6_devconf.accept_source_route,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
+       {
+               .procname       = "proxy_ndp",
+               .data           = &ipv6_devconf.proxy_ndp,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_proxy_ndp,
+       },
+       {
+               .procname       = "accept_source_route",
+               .data           = &ipv6_devconf.accept_source_route,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
-               {
-                       .procname       = "optimistic_dad",
-                       .data           = &ipv6_devconf.optimistic_dad,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-
-               },
-               {
-                       .procname       = "use_optimistic",
-                       .data           = &ipv6_devconf.use_optimistic,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-
-               },
+       {
+               .procname       = "optimistic_dad",
+               .data           = &ipv6_devconf.optimistic_dad,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "use_optimistic",
+               .data           = &ipv6_devconf.use_optimistic,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #endif
 #ifdef CONFIG_IPV6_MROUTE
-               {
-                       .procname       = "mc_forwarding",
-                       .data           = &ipv6_devconf.mc_forwarding,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0444,
-                       .proc_handler   = proc_dointvec,
-               },
+       {
+               .procname       = "mc_forwarding",
+               .data           = &ipv6_devconf.mc_forwarding,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
+       },
 #endif
-               {
-                       .procname       = "disable_ipv6",
-                       .data           = &ipv6_devconf.disable_ipv6,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_disable,
-               },
-               {
-                       .procname       = "accept_dad",
-                       .data           = &ipv6_devconf.accept_dad,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "force_tllao",
-                       .data           = &ipv6_devconf.force_tllao,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec
-               },
-               {
-                       .procname       = "ndisc_notify",
-                       .data           = &ipv6_devconf.ndisc_notify,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec
-               },
-               {
-                       .procname       = "suppress_frag_ndisc",
-                       .data           = &ipv6_devconf.suppress_frag_ndisc,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec
-               },
-               {
-                       .procname       = "accept_ra_from_local",
-                       .data           = &ipv6_devconf.accept_ra_from_local,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "accept_ra_mtu",
-                       .data           = &ipv6_devconf.accept_ra_mtu,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "stable_secret",
-                       .data           = &ipv6_devconf.stable_secret,
-                       .maxlen         = IPV6_MAX_STRLEN,
-                       .mode           = 0600,
-                       .proc_handler   = addrconf_sysctl_stable_secret,
-               },
-               {
-                       .procname       = "use_oif_addrs_only",
-                       .data           = &ipv6_devconf.use_oif_addrs_only,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "ignore_routes_with_linkdown",
-                       .data           = &ipv6_devconf.ignore_routes_with_linkdown,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = addrconf_sysctl_ignore_routes_with_linkdown,
-               },
-               {
-                       .procname       = "drop_unicast_in_l2_multicast",
-                       .data           = &ipv6_devconf.drop_unicast_in_l2_multicast,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "drop_unsolicited_na",
-                       .data           = &ipv6_devconf.drop_unsolicited_na,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-               },
-               {
-                       .procname       = "keep_addr_on_down",
-                       .data           = &ipv6_devconf.keep_addr_on_down,
-                       .maxlen         = sizeof(int),
-                       .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
-
-               },
-               {
-                       /* sentinel */
-               }
+       {
+               .procname       = "disable_ipv6",
+               .data           = &ipv6_devconf.disable_ipv6,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_disable,
+       },
+       {
+               .procname       = "accept_dad",
+               .data           = &ipv6_devconf.accept_dad,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "force_tllao",
+               .data           = &ipv6_devconf.force_tllao,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "ndisc_notify",
+               .data           = &ipv6_devconf.ndisc_notify,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "suppress_frag_ndisc",
+               .data           = &ipv6_devconf.suppress_frag_ndisc,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "accept_ra_from_local",
+               .data           = &ipv6_devconf.accept_ra_from_local,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "accept_ra_mtu",
+               .data           = &ipv6_devconf.accept_ra_mtu,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "stable_secret",
+               .data           = &ipv6_devconf.stable_secret,
+               .maxlen         = IPV6_MAX_STRLEN,
+               .mode           = 0600,
+               .proc_handler   = addrconf_sysctl_stable_secret,
+       },
+       {
+               .procname       = "use_oif_addrs_only",
+               .data           = &ipv6_devconf.use_oif_addrs_only,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "ignore_routes_with_linkdown",
+               .data           = &ipv6_devconf.ignore_routes_with_linkdown,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = addrconf_sysctl_ignore_routes_with_linkdown,
+       },
+       {
+               .procname       = "drop_unicast_in_l2_multicast",
+               .data           = &ipv6_devconf.drop_unicast_in_l2_multicast,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "drop_unsolicited_na",
+               .data           = &ipv6_devconf.drop_unsolicited_na,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "keep_addr_on_down",
+               .data           = &ipv6_devconf.keep_addr_on_down,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+
+       },
+       {
+               /* sentinel */
+       }
 };
 
 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
                struct inet6_dev *idev, struct ipv6_devconf *p)
 {
        int i;
-       struct addrconf_sysctl_table *t;
+       struct ctl_table *table;
        char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
-       t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
-       if (!t)
+       table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
+       if (!table)
                goto out;
 
-       for (i = 0; t->addrconf_vars[i].data; i++) {
-               t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
-               t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
-               t->addrconf_vars[i].extra2 = net;
+       for (i = 0; table[i].data; i++) {
+               table[i].data += (char *)p - (char *)&ipv6_devconf;
+               table[i].extra1 = idev; /* embedded; no ref */
+               table[i].extra2 = net;
        }
 
        snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
 
-       t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
-       if (!t->sysctl_header)
+       p->sysctl_header = register_net_sysctl(net, path, table);
+       if (!p->sysctl_header)
                goto free;
 
-       p->sysctl = t;
        return 0;
 
 free:
-       kfree(t);
+       kfree(table);
 out:
        return -ENOBUFS;
 }
 
 static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
 {
-       struct addrconf_sysctl_table *t;
+       struct ctl_table *table;
 
-       if (!p->sysctl)
+       if (!p->sysctl_header)
                return;
 
-       t = p->sysctl;
-       p->sysctl = NULL;
-       unregister_net_sysctl_table(t->sysctl_header);
-       kfree(t);
+       table = p->sysctl_header->ctl_table_arg;
+       unregister_net_sysctl_table(p->sysctl_header);
+       p->sysctl_header = NULL;
+       kfree(table);
 }
 
 static int addrconf_sysctl_register(struct inet6_dev *idev)
index a73d70119fcd7748aef5d906dad09cb59c99fb75..00d0c2903173a96571983216f2839a93059cad22 100644 (file)
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
        return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
+static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       memset(fl6, 0, sizeof(*fl6));
+       fl6->flowi6_proto = sk->sk_protocol;
+       fl6->daddr = sk->sk_v6_daddr;
+       fl6->saddr = np->saddr;
+       fl6->flowi6_oif = sk->sk_bound_dev_if;
+       fl6->flowi6_mark = sk->sk_mark;
+       fl6->fl6_dport = inet->inet_dport;
+       fl6->fl6_sport = inet->inet_sport;
+       fl6->flowlabel = np->flow_label;
+
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
+       if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+               fl6->flowi6_oif = np->mcast_oif;
+
+       security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+}
+
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+{
+       struct ip6_flowlabel *flowlabel = NULL;
+       struct in6_addr *final_p, final;
+       struct ipv6_txoptions *opt;
+       struct dst_entry *dst;
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flowi6 fl6;
+       int err = 0;
+
+       if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+               flowlabel = fl6_sock_lookup(sk, np->flow_label);
+               if (!flowlabel)
+                       return -EINVAL;
+       }
+       ip6_datagram_flow_key_init(&fl6, sk);
+
+       rcu_read_lock();
+       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+       final_p = fl6_update_dst(&fl6, opt, &final);
+       rcu_read_unlock();
+
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto out;
+       }
+
+       if (fix_sk_saddr) {
+               if (ipv6_addr_any(&np->saddr))
+                       np->saddr = fl6.saddr;
+
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       sk->sk_v6_rcv_saddr = fl6.saddr;
+                       inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+                       if (sk->sk_prot->rehash)
+                               sk->sk_prot->rehash(sk);
+               }
+       }
+
+       ip6_dst_store(sk, dst,
+                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                     &sk->sk_v6_daddr : NULL,
+#ifdef CONFIG_IPV6_SUBTREES
+                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
+                     &np->saddr :
+#endif
+                     NULL);
+
+out:
+       fl6_sock_release(flowlabel);
+       return err;
+}
+
+void ip6_datagram_release_cb(struct sock *sk)
+{
+       struct dst_entry *dst;
+
+       if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               return;
+
+       rcu_read_lock();
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       ip6_datagram_dst_update(sk, false);
+}
+EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
+
 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
        struct inet_sock        *inet = inet_sk(sk);
        struct ipv6_pinfo       *np = inet6_sk(sk);
-       struct in6_addr *daddr, *final_p, final;
-       struct dst_entry        *dst;
-       struct flowi6           fl6;
-       struct ip6_flowlabel    *flowlabel = NULL;
-       struct ipv6_txoptions   *opt;
+       struct in6_addr         *daddr;
        int                     addr_type;
        int                     err;
+       __be32                  fl6_flowlabel = 0;
 
        if (usin->sin6_family == AF_INET) {
                if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl6, 0, sizeof(fl6));
-       if (np->sndflow) {
-               fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-               if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
-                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (!flowlabel)
-                               return -EINVAL;
-               }
-       }
+       if (np->sndflow)
+               fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
        addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -145,7 +234,7 @@ ipv4_connected:
        }
 
        sk->sk_v6_daddr = *daddr;
-       np->flow_label = fl6.flowlabel;
+       np->flow_label = fl6_flowlabel;
 
        inet->inet_dport = usin->sin6_port;
 
@@ -154,59 +243,13 @@ ipv4_connected:
         *      destination cache for it.
         */
 
-       fl6.flowi6_proto = sk->sk_protocol;
-       fl6.daddr = sk->sk_v6_daddr;
-       fl6.saddr = np->saddr;
-       fl6.flowi6_oif = sk->sk_bound_dev_if;
-       fl6.flowi6_mark = sk->sk_mark;
-       fl6.fl6_dport = inet->inet_dport;
-       fl6.fl6_sport = inet->inet_sport;
-
-       if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
-
-       if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
-               fl6.flowi6_oif = np->mcast_oif;
-
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-       rcu_read_lock();
-       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
-       final_p = fl6_update_dst(&fl6, opt, &final);
-       rcu_read_unlock();
-
-       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-       err = 0;
-       if (IS_ERR(dst)) {
-               err = PTR_ERR(dst);
+       err = ip6_datagram_dst_update(sk, true);
+       if (err)
                goto out;
-       }
-
-       /* source address lookup done in ip6_dst_lookup */
-
-       if (ipv6_addr_any(&np->saddr))
-               np->saddr = fl6.saddr;
-
-       if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-               sk->sk_v6_rcv_saddr = fl6.saddr;
-               inet->inet_rcv_saddr = LOOPBACK4_IPV6;
-               if (sk->sk_prot->rehash)
-                       sk->sk_prot->rehash(sk);
-       }
-
-       ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
-                     &sk->sk_v6_daddr : NULL,
-#ifdef CONFIG_IPV6_SUBTREES
-                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
-                     &np->saddr :
-#endif
-                     NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
-       fl6_sock_release(flowlabel);
        return err;
 }
 
@@ -407,9 +450,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
                copied = len;
        }
        err = skb_copy_datagram_msg(skb, 0, msg, copied);
-       if (err)
-               goto out_free_skb;
-
+       if (unlikely(err)) {
+               kfree_skb(skb);
+               return err;
+       }
        sock_recv_timestamp(msg, sk, skb);
 
        serr = SKB_EXT_ERR(skb);
@@ -466,8 +510,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
        msg->msg_flags |= MSG_ERRQUEUE;
        err = copied;
 
-out_free_skb:
-       kfree_skb(skb);
+       consume_skb(skb);
 out:
        return err;
 }
@@ -684,14 +727,13 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                          struct msghdr *msg, struct flowi6 *fl6,
-                         struct ipv6_txoptions *opt,
-                         int *hlimit, int *tclass, int *dontfrag,
-                         struct sockcm_cookie *sockc)
+                         struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
        struct ipv6_rt_hdr *rthdr;
        struct ipv6_opt_hdr *hdr;
+       struct ipv6_txoptions *opt = ipc6->opt;
        int len;
        int err = 0;
 
@@ -910,8 +952,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                                goto exit_f;
                        }
 
-                       *hlimit = *(int *)CMSG_DATA(cmsg);
-                       if (*hlimit < -1 || *hlimit > 0xff) {
+                       ipc6->hlimit = *(int *)CMSG_DATA(cmsg);
+                       if (ipc6->hlimit < -1 || ipc6->hlimit > 0xff) {
                                err = -EINVAL;
                                goto exit_f;
                        }
@@ -931,7 +973,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                                goto exit_f;
 
                        err = 0;
-                       *tclass = tc;
+                       ipc6->tclass = tc;
 
                        break;
                    }
@@ -949,7 +991,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                                goto exit_f;
 
                        err = 0;
-                       *dontfrag = df;
+                       ipc6->dontfrag = df;
 
                        break;
                    }
index ea7c4d64a00adad60a634afb2c6efca4ab029799..8de5dd7aaa05cf50aede2fb7255215df527349d7 100644 (file)
@@ -258,8 +258,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
            !pskb_may_pull(skb, (skb_transport_offset(skb) +
                                 ((skb_transport_header(skb)[1] + 1) << 3)))) {
-               IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
+                               IPSTATS_MIB_INHDRERRORS);
                kfree_skb(skb);
                return -1;
        }
@@ -280,8 +280,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
                return 1;
        }
 
-       IP6_INC_STATS_BH(dev_net(dst->dev),
-                        ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
+       __IP6_INC_STATS(dev_net(dst->dev),
+                       ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
        return -1;
 }
 
@@ -309,8 +309,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
            !pskb_may_pull(skb, (skb_transport_offset(skb) +
                                 ((skb_transport_header(skb)[1] + 1) << 3)))) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                               IPSTATS_MIB_INHDRERRORS);
                kfree_skb(skb);
                return -1;
        }
@@ -319,8 +319,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
 
        if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
            skb->pkt_type != PACKET_HOST) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_INADDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                               IPSTATS_MIB_INADDRERRORS);
                kfree_skb(skb);
                return -1;
        }
@@ -334,8 +334,8 @@ looped_back:
                         * processed by own
                         */
                        if (!addr) {
-                               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                                IPSTATS_MIB_INADDRERRORS);
+                               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                               IPSTATS_MIB_INADDRERRORS);
                                kfree_skb(skb);
                                return -1;
                        }
@@ -360,8 +360,8 @@ looped_back:
                        goto unknown_rh;
                /* Silently discard invalid RTH type 2 */
                if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_INHDRERRORS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_INHDRERRORS);
                        kfree_skb(skb);
                        return -1;
                }
@@ -379,8 +379,8 @@ looped_back:
        n = hdr->hdrlen >> 1;
 
        if (hdr->segments_left > n) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                               IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                  ((&hdr->segments_left) -
                                   skb_network_header(skb)));
@@ -393,8 +393,8 @@ looped_back:
        if (skb_cloned(skb)) {
                /* the copy is a forwarded packet */
                if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_OUTDISCARDS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_OUTDISCARDS);
                        kfree_skb(skb);
                        return -1;
                }
@@ -416,14 +416,14 @@ looped_back:
                if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
                                     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
                                     IPPROTO_ROUTING) < 0) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_INADDRERRORS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_INADDRERRORS);
                        kfree_skb(skb);
                        return -1;
                }
                if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_INADDRERRORS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_INADDRERRORS);
                        kfree_skb(skb);
                        return -1;
                }
@@ -434,8 +434,8 @@ looped_back:
        }
 
        if (ipv6_addr_is_multicast(addr)) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_INADDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                               IPSTATS_MIB_INADDRERRORS);
                kfree_skb(skb);
                return -1;
        }
@@ -454,8 +454,8 @@ looped_back:
 
        if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
                if (ipv6_hdr(skb)->hop_limit <= 1) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_INHDRERRORS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_INHDRERRORS);
                        icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
                                    0);
                        kfree_skb(skb);
@@ -470,7 +470,7 @@ looped_back:
        return -1;
 
 unknown_rh:
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                          (&hdr->type) - skb_network_header(skb));
        return -1;
@@ -568,28 +568,28 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
        if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
                net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
                                    nh[optoff+1]);
-               IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ipv6_skb_idev(skb),
+                               IPSTATS_MIB_INHDRERRORS);
                goto drop;
        }
 
        pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
        if (pkt_len <= IPV6_MAXPLEN) {
-               IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ipv6_skb_idev(skb),
+                               IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
                return false;
        }
        if (ipv6_hdr(skb)->payload_len) {
-               IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ipv6_skb_idev(skb),
+                               IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
                return false;
        }
 
        if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
-               IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-                                IPSTATS_MIB_INTRUNCATEDPKTS);
+               __IP6_INC_STATS(net, ipv6_skb_idev(skb),
+                               IPSTATS_MIB_INTRUNCATEDPKTS);
                goto drop;
        }
 
index 6b573ebe49deffc00c145d5aa750da90e06eb844..4527285fcaa2c2c8134c089b88e0cfeeeddad292 100644 (file)
@@ -401,10 +401,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        struct flowi6 fl6;
        struct icmpv6_msg msg;
        struct sockcm_cookie sockc_unused = {0};
+       struct ipcm6_cookie ipc6;
        int iif = 0;
        int addr_type = 0;
        int len;
-       int hlimit;
        int err = 0;
        u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
@@ -446,6 +446,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        if (__ipv6_addr_needs_scope_id(addr_type))
                iif = skb->dev->ifindex;
+       else
+               iif = l3mdev_master_ifindex(skb->dev);
 
        /*
         *      Must not send error if the source does not uniquely
@@ -500,14 +502,14 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
-       if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
-
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
        if (IS_ERR(dst))
                goto out;
 
-       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       ipc6.tclass = np->tclass;
+       ipc6.dontfrag = np->dontfrag;
+       ipc6.opt = NULL;
 
        msg.skb = skb;
        msg.offset = skb_network_offset(skb);
@@ -526,9 +528,9 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        err = ip6_append_data(sk, icmpv6_getfrag, &msg,
                              len + sizeof(struct icmp6hdr),
-                             sizeof(struct icmp6hdr), hlimit,
-                             np->tclass, NULL, &fl6, (struct rt6_info *)dst,
-                             MSG_DONTWAIT, np->dontfrag, &sockc_unused);
+                             sizeof(struct icmp6hdr),
+                             &ipc6, &fl6, (struct rt6_info *)dst,
+                             MSG_DONTWAIT, &sockc_unused);
        if (err) {
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
@@ -563,9 +565,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        struct flowi6 fl6;
        struct icmpv6_msg msg;
        struct dst_entry *dst;
+       struct ipcm6_cookie ipc6;
        int err = 0;
-       int hlimit;
-       u8 tclass;
        u32 mark = IP6_REPLY_MARK(net, skb->mark);
        struct sockcm_cookie sockc_unused = {0};
 
@@ -607,22 +608,24 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        if (IS_ERR(dst))
                goto out;
 
-       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
-
        idev = __in6_dev_get(skb->dev);
 
        msg.skb = skb;
        msg.offset = 0;
        msg.type = ICMPV6_ECHO_REPLY;
 
-       tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+       ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+       ipc6.dontfrag = np->dontfrag;
+       ipc6.opt = NULL;
+
        err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
-                               sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6,
+                               sizeof(struct icmp6hdr), &ipc6, &fl6,
                                (struct rt6_info *)dst, MSG_DONTWAIT,
-                               np->dontfrag, &sockc_unused);
+                               &sockc_unused);
 
        if (err) {
-               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
+               __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
        } else {
                err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
@@ -674,7 +677,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        return;
 
 out:
-       ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+       __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
 }
 
 /*
@@ -710,7 +713,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
                skb_set_network_header(skb, nh);
        }
 
-       ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
+       __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
 
        saddr = &ipv6_hdr(skb)->saddr;
        daddr = &ipv6_hdr(skb)->daddr;
@@ -728,7 +731,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
 
        type = hdr->icmp6_type;
 
-       ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
+       ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
 
        switch (type) {
        case ICMPV6_ECHO_REQUEST:
@@ -812,9 +815,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
        return 0;
 
 csum_error:
-       ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
+       __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
 discard_it:
-       ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
+       __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
 drop_no_count:
        kfree_skb(skb);
        return 0;
index 28542cb2b38712c3fa28b637271c9836fccebb10..d08fd2d48a7861dcfa1d3bff79cd8867c3ec9669 100644 (file)
 #include <net/protocol.h>
 #include <uapi/linux/ila.h>
 
+struct ila_locator {
+       union {
+               __u8            v8[8];
+               __be16          v16[4];
+               __be32          v32[2];
+               __be64          v64;
+       };
+};
+
+struct ila_identifier {
+       union {
+               struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+                       u8 __space:4;
+                       u8 csum_neutral:1;
+                       u8 type:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+                       u8 type:3;
+                       u8 csum_neutral:1;
+                       u8 __space:4;
+#else
+#error  "Adjust your <asm/byteorder.h> defines"
+#endif
+                       u8 __space2[7];
+               };
+               __u8            v8[8];
+               __be16          v16[4];
+               __be32          v32[2];
+               __be64          v64;
+       };
+};
+
+enum {
+       ILA_ATYPE_IID = 0,
+       ILA_ATYPE_LUID,
+       ILA_ATYPE_VIRT_V4,
+       ILA_ATYPE_VIRT_UNI_V6,
+       ILA_ATYPE_VIRT_MULTI_V6,
+       ILA_ATYPE_RSVD_1,
+       ILA_ATYPE_RSVD_2,
+       ILA_ATYPE_RSVD_3,
+};
+
+#define CSUM_NEUTRAL_FLAG      htonl(0x10000000)
+
+struct ila_addr {
+       union {
+               struct in6_addr addr;
+               struct {
+                       struct ila_locator loc;
+                       struct ila_identifier ident;
+               };
+       };
+};
+
+static inline struct ila_addr *ila_a2i(struct in6_addr *addr)
+{
+       return (struct ila_addr *)addr;
+}
+
+static inline bool ila_addr_is_ila(struct ila_addr *iaddr)
+{
+       return (iaddr->ident.type != ILA_ATYPE_IID);
+}
+
 struct ila_params {
-       __be64 locator;
-       __be64 locator_match;
+       struct ila_locator locator;
+       struct ila_locator locator_match;
        __wsum csum_diff;
+       u8 csum_mode;
 };
 
 static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
@@ -38,7 +104,14 @@ static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
        return csum_partial(diff, sizeof(diff), 0);
 }
 
-void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p);
+static inline bool ila_csum_neutral_set(struct ila_identifier ident)
+{
+       return !!(ident.csum_neutral);
+}
+
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p);
+
+void ila_init_saved_csum(struct ila_params *p);
 
 int ila_lwt_init(void);
 void ila_lwt_fini(void);
index 30613050e4cab01bf26d090bcc4765e96f2a097a..0e94042d12891ad4388d5924315d4a8403525b3c 100644 (file)
 
 static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
 {
-       if (*(__be64 *)&ip6h->daddr == p->locator_match)
+       struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
+
+       if (p->locator_match.v64)
                return p->csum_diff;
        else
-               return compute_csum_diff8((__be32 *)&ip6h->daddr,
+               return compute_csum_diff8((__be32 *)&iaddr->loc,
+                                         (__be32 *)&p->locator);
+}
+
+static void ila_csum_do_neutral(struct ila_addr *iaddr,
+                               struct ila_params *p)
+{
+       __sum16 *adjust = (__force __sum16 *)&iaddr->ident.v16[3];
+       __wsum diff, fval;
+
+       /* Check if checksum adjust value has been cached */
+       if (p->locator_match.v64) {
+               diff = p->csum_diff;
+       } else {
+               diff = compute_csum_diff8((__be32 *)iaddr,
                                          (__be32 *)&p->locator);
+       }
+
+       fval = (__force __wsum)(ila_csum_neutral_set(iaddr->ident) ?
+                       ~CSUM_NEUTRAL_FLAG : CSUM_NEUTRAL_FLAG);
+
+       diff = csum_add(diff, fval);
+
+       *adjust = ~csum_fold(csum_add(diff, csum_unfold(*adjust)));
+
+       /* Flip the csum-neutral bit. Either we are doing a SIR->ILA
+        * translation with ILA_CSUM_NEUTRAL_MAP as the csum_method
+        * and the C-bit is not set, or we are doing an ILA-SIR
+        * tranlsation and the C-bit is set.
+        */
+       iaddr->ident.csum_neutral ^= 1;
 }
 
-void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+static void ila_csum_adjust_transport(struct sk_buff *skb,
+                                     struct ila_params *p)
 {
        __wsum diff;
        struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
        size_t nhoff = sizeof(struct ipv6hdr);
 
-       /* First update checksum */
        switch (ip6h->nexthdr) {
        case NEXTHDR_TCP:
                if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
@@ -68,7 +100,46 @@ void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
        }
 
        /* Now change destination address */
-       *(__be64 *)&ip6h->daddr = p->locator;
+       iaddr->loc = p->locator;
+}
+
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
+
+       /* First deal with the transport checksum */
+       if (ila_csum_neutral_set(iaddr->ident)) {
+               /* C-bit is set in the locator indicating that this
+                * is a locator being translated to a SIR address.
+                * Perform (receiver) checksum-neutral translation.
+                */
+               ila_csum_do_neutral(iaddr, p);
+       } else {
+               switch (p->csum_mode) {
+               case ILA_CSUM_ADJUST_TRANSPORT:
+                       ila_csum_adjust_transport(skb, p);
+                       break;
+               case ILA_CSUM_NEUTRAL_MAP:
+                       ila_csum_do_neutral(iaddr, p);
+                       break;
+               case ILA_CSUM_NO_ACTION:
+                       break;
+               }
+       }
+
+       /* Now change destination address */
+       iaddr->loc = p->locator;
+}
+
+void ila_init_saved_csum(struct ila_params *p)
+{
+       if (!p->locator_match.v64)
+               return;
+
+       p->csum_diff = compute_csum_diff8(
+                               (__be32 *)&p->locator_match,
+                               (__be32 *)&p->locator);
 }
 
 static int __init ila_init(void)
index 2ae3c4fd8aabc65a7206a73480b3dca4b975f7e3..1dfb64166d7d71e8ac7d673b580baa00e6f73748 100644 (file)
@@ -26,7 +26,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        if (skb->protocol != htons(ETH_P_IPV6))
                goto drop;
 
-       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+       ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
        return dst->lwtstate->orig_output(net, sk, skb);
 
@@ -42,7 +42,7 @@ static int ila_input(struct sk_buff *skb)
        if (skb->protocol != htons(ETH_P_IPV6))
                goto drop;
 
-       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+       ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
        return dst->lwtstate->orig_input(skb);
 
@@ -53,6 +53,7 @@ drop:
 
 static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
        [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+       [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
 };
 
 static int ila_build_state(struct net_device *dev, struct nlattr *nla,
@@ -64,11 +65,28 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla,
        size_t encap_len = sizeof(*p);
        struct lwtunnel_state *newts;
        const struct fib6_config *cfg6 = cfg;
+       struct ila_addr *iaddr;
        int ret;
 
        if (family != AF_INET6)
                return -EINVAL;
 
+       if (cfg6->fc_dst_len < sizeof(struct ila_locator) + 1) {
+               /* Need to have full locator and at least type field
+                * included in destination
+                */
+               return -EINVAL;
+       }
+
+       iaddr = (struct ila_addr *)&cfg6->fc_dst;
+
+       if (!ila_addr_is_ila(iaddr) || ila_csum_neutral_set(iaddr->ident)) {
+               /* Don't allow translation for a non-ILA address or checksum
+                * neutral flag to be set.
+                */
+               return -EINVAL;
+       }
+
        ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
                               ila_nl_policy);
        if (ret < 0)
@@ -84,16 +102,19 @@ static int ila_build_state(struct net_device *dev, struct nlattr *nla,
        newts->len = encap_len;
        p = ila_params_lwtunnel(newts);
 
-       p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+       p->locator.v64 = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
 
-       if (cfg6->fc_dst_len > sizeof(__be64)) {
-               /* Precompute checksum difference for translation since we
-                * know both the old locator and the new one.
-                */
-               p->locator_match = *(__be64 *)&cfg6->fc_dst;
-               p->csum_diff = compute_csum_diff8(
-                       (__be32 *)&p->locator_match, (__be32 *)&p->locator);
-       }
+       /* Precompute checksum difference for translation since we
+        * know both the old locator and the new one.
+        */
+       p->locator_match = iaddr->loc;
+       p->csum_diff = compute_csum_diff8(
+               (__be32 *)&p->locator_match, (__be32 *)&p->locator);
+
+       if (tb[ILA_ATTR_CSUM_MODE])
+               p->csum_mode = nla_get_u8(tb[ILA_ATTR_CSUM_MODE]);
+
+       ila_init_saved_csum(p);
 
        newts->type = LWTUNNEL_ENCAP_ILA;
        newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
@@ -109,7 +130,10 @@ static int ila_fill_encap_info(struct sk_buff *skb,
 {
        struct ila_params *p = ila_params_lwtunnel(lwtstate);
 
-       if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+       if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64,
+                             ILA_ATTR_PAD))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode))
                goto nla_put_failure;
 
        return 0;
@@ -120,8 +144,9 @@ nla_put_failure:
 
 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       /* No encapsulation overhead */
-       return 0;
+       return nla_total_size_64bit(sizeof(u64)) + /* ILA_ATTR_LOCATOR */
+              nla_total_size(sizeof(u8)) +        /* ILA_ATTR_CSUM_MODE */
+              0;
 }
 
 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
@@ -129,7 +154,7 @@ static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
        struct ila_params *a_p = ila_params_lwtunnel(a);
        struct ila_params *b_p = ila_params_lwtunnel(b);
 
-       return (a_p->locator != b_p->locator);
+       return (a_p->locator.v64 != b_p->locator.v64);
 }
 
 static const struct lwtunnel_encap_ops ila_encap_ops = {
index 0b03533453e4e9fdd3f9208ac0fe8913f169f9be..a90e57229c6c9004462e42281175b6cfdc9dc965 100644 (file)
 
 struct ila_xlat_params {
        struct ila_params ip;
-       __be64 identifier;
        int ifindex;
-       unsigned int dir;
 };
 
 struct ila_map {
-       struct ila_xlat_params p;
+       struct ila_xlat_params xp;
        struct rhash_head node;
        struct ila_map __rcu *next;
        struct rcu_head rcu;
@@ -66,31 +64,29 @@ static __always_inline void __ila_hash_secret_init(void)
        net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static inline u32 ila_identifier_hash(__be64 identifier)
+static inline u32 ila_locator_hash(struct ila_locator loc)
 {
-       u32 *v = (u32 *)&identifier;
+       u32 *v = (u32 *)loc.v32;
 
        return jhash_2words(v[0], v[1], hashrnd);
 }
 
-static inline spinlock_t *ila_get_lock(struct ila_net *ilan, __be64 identifier)
+static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
+                                      struct ila_locator loc)
 {
-       return &ilan->locks[ila_identifier_hash(identifier) & ilan->locks_mask];
+       return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask];
 }
 
-static inline int ila_cmp_wildcards(struct ila_map *ila, __be64 loc,
-                                   int ifindex, unsigned int dir)
+static inline int ila_cmp_wildcards(struct ila_map *ila,
+                                   struct ila_addr *iaddr, int ifindex)
 {
-       return (ila->p.ip.locator_match && ila->p.ip.locator_match != loc) ||
-              (ila->p.ifindex && ila->p.ifindex != ifindex) ||
-              !(ila->p.dir & dir);
+       return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
 }
 
-static inline int ila_cmp_params(struct ila_map *ila, struct ila_xlat_params *p)
+static inline int ila_cmp_params(struct ila_map *ila,
+                                struct ila_xlat_params *xp)
 {
-       return (ila->p.ip.locator_match != p->ip.locator_match) ||
-              (ila->p.ifindex != p->ifindex) ||
-              (ila->p.dir != p->dir);
+       return (ila->xp.ifindex != xp->ifindex);
 }
 
 static int ila_cmpfn(struct rhashtable_compare_arg *arg,
@@ -98,17 +94,14 @@ static int ila_cmpfn(struct rhashtable_compare_arg *arg,
 {
        const struct ila_map *ila = obj;
 
-       return (ila->p.identifier != *(__be64 *)arg->key);
+       return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
 }
 
 static inline int ila_order(struct ila_map *ila)
 {
        int score = 0;
 
-       if (ila->p.ip.locator_match)
-               score += 1 << 0;
-
-       if (ila->p.ifindex)
+       if (ila->xp.ifindex)
                score += 1 << 1;
 
        return score;
@@ -117,7 +110,7 @@ static inline int ila_order(struct ila_map *ila)
 static const struct rhashtable_params rht_params = {
        .nelem_hint = 1024,
        .head_offset = offsetof(struct ila_map, node),
-       .key_offset = offsetof(struct ila_map, p.identifier),
+       .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
        .key_len = sizeof(u64), /* identifier */
        .max_size = 1048576,
        .min_size = 256,
@@ -136,50 +129,45 @@ static struct genl_family ila_nl_family = {
 };
 
 static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
-       [ILA_ATTR_IDENTIFIER] = { .type = NLA_U64, },
        [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
        [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
        [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
-       [ILA_ATTR_DIR] = { .type = NLA_U32, },
+       [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
 };
 
 static int parse_nl_config(struct genl_info *info,
-                          struct ila_xlat_params *p)
+                          struct ila_xlat_params *xp)
 {
-       memset(p, 0, sizeof(*p));
-
-       if (info->attrs[ILA_ATTR_IDENTIFIER])
-               p->identifier = (__force __be64)nla_get_u64(
-                       info->attrs[ILA_ATTR_IDENTIFIER]);
+       memset(xp, 0, sizeof(*xp));
 
        if (info->attrs[ILA_ATTR_LOCATOR])
-               p->ip.locator = (__force __be64)nla_get_u64(
+               xp->ip.locator.v64 = (__force __be64)nla_get_u64(
                        info->attrs[ILA_ATTR_LOCATOR]);
 
        if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
-               p->ip.locator_match = (__force __be64)nla_get_u64(
+               xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
                        info->attrs[ILA_ATTR_LOCATOR_MATCH]);
 
-       if (info->attrs[ILA_ATTR_IFINDEX])
-               p->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
+       if (info->attrs[ILA_ATTR_CSUM_MODE])
+               xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
 
-       if (info->attrs[ILA_ATTR_DIR])
-               p->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]);
+       if (info->attrs[ILA_ATTR_IFINDEX])
+               xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
 
        return 0;
 }
 
 /* Must be called with rcu readlock */
-static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc,
+static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
                                                   int ifindex,
-                                                  unsigned int dir,
                                                   struct ila_net *ilan)
 {
        struct ila_map *ila;
 
-       ila = rhashtable_lookup_fast(&ilan->rhash_table, &id, rht_params);
+       ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc,
+                                    rht_params);
        while (ila) {
-               if (!ila_cmp_wildcards(ila, loc, ifindex, dir))
+               if (!ila_cmp_wildcards(ila, iaddr, ifindex))
                        return ila;
                ila = rcu_access_pointer(ila->next);
        }
@@ -188,15 +176,16 @@ static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc,
 }
 
 /* Must be called with rcu readlock */
-static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *p,
+static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
                                                   struct ila_net *ilan)
 {
        struct ila_map *ila;
 
-       ila = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
+       ila = rhashtable_lookup_fast(&ilan->rhash_table,
+                                    &xp->ip.locator_match,
                                     rht_params);
        while (ila) {
-               if (!ila_cmp_params(ila, p))
+               if (!ila_cmp_params(ila, xp))
                        return ila;
                ila = rcu_access_pointer(ila->next);
        }
@@ -221,14 +210,14 @@ static void ila_free_cb(void *ptr, void *arg)
        }
 }
 
-static int ila_xlat_addr(struct sk_buff *skb, int dir);
+static int ila_xlat_addr(struct sk_buff *skb);
 
 static unsigned int
 ila_nf_input(void *priv,
             struct sk_buff *skb,
             const struct nf_hook_state *state)
 {
-       ila_xlat_addr(skb, ILA_DIR_IN);
+       ila_xlat_addr(skb);
        return NF_ACCEPT;
 }
 
@@ -241,11 +230,11 @@ static struct nf_hook_ops ila_nf_hook_ops[] __read_mostly = {
        },
 };
 
-static int ila_add_mapping(struct net *net, struct ila_xlat_params *p)
+static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
 {
        struct ila_net *ilan = net_generic(net, ila_net_id);
        struct ila_map *ila, *head;
-       spinlock_t *lock = ila_get_lock(ilan, p->identifier);
+       spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
        int err = 0, order;
 
        if (!ilan->hooks_registered) {
@@ -264,22 +253,16 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *p)
        if (!ila)
                return -ENOMEM;
 
-       ila->p = *p;
+       ila_init_saved_csum(&xp->ip);
 
-       if (p->ip.locator_match) {
-               /* Precompute checksum difference for translation since we
-                * know both the old identifier and the new one.
-                */
-               ila->p.ip.csum_diff = compute_csum_diff8(
-                       (__be32 *)&p->ip.locator_match,
-                       (__be32 *)&p->ip.locator);
-       }
+       ila->xp = *xp;
 
        order = ila_order(ila);
 
        spin_lock(lock);
 
-       head = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
+       head = rhashtable_lookup_fast(&ilan->rhash_table,
+                                     &xp->ip.locator_match,
                                      rht_params);
        if (!head) {
                /* New entry for the rhash_table */
@@ -289,7 +272,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *p)
                struct ila_map *tila = head, *prev = NULL;
 
                do {
-                       if (!ila_cmp_params(tila, p)) {
+                       if (!ila_cmp_params(tila, xp)) {
                                err = -EEXIST;
                                goto out;
                        }
@@ -326,23 +309,23 @@ out:
        return err;
 }
 
-static int ila_del_mapping(struct net *net, struct ila_xlat_params *p)
+static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
 {
        struct ila_net *ilan = net_generic(net, ila_net_id);
        struct ila_map *ila, *head, *prev;
-       spinlock_t *lock = ila_get_lock(ilan, p->identifier);
+       spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
        int err = -ENOENT;
 
        spin_lock(lock);
 
        head = rhashtable_lookup_fast(&ilan->rhash_table,
-                                     &p->identifier, rht_params);
+                                     &xp->ip.locator_match, rht_params);
        ila = head;
 
        prev = NULL;
 
        while (ila) {
-               if (ila_cmp_params(ila, p)) {
+               if (ila_cmp_params(ila, xp)) {
                        prev = ila;
                        ila = rcu_dereference_protected(ila->next,
                                                        lockdep_is_held(lock));
@@ -404,28 +387,28 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
 static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
-       struct ila_xlat_params p;
+       struct ila_xlat_params xp;
        int err;
 
-       err = parse_nl_config(info, &p);
+       err = parse_nl_config(info, &xp);
        if (err)
                return err;
 
-       ila_del_mapping(net, &p);
+       ila_del_mapping(net, &xp);
 
        return 0;
 }
 
 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
 {
-       if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER,
-                       (__force u64)ila->p.identifier) ||
-           nla_put_u64(msg, ILA_ATTR_LOCATOR,
-                       (__force u64)ila->p.ip.locator) ||
-           nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH,
-                       (__force u64)ila->p.ip.locator_match) ||
-           nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) ||
-           nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir))
+       if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
+                             (__force u64)ila->xp.ip.locator.v64,
+                             ILA_ATTR_PAD) ||
+           nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
+                             (__force u64)ila->xp.ip.locator_match.v64,
+                             ILA_ATTR_PAD) ||
+           nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
+           nla_put_u32(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode))
                return -1;
 
        return 0;
@@ -457,11 +440,11 @@ static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
        struct net *net = genl_info_net(info);
        struct ila_net *ilan = net_generic(net, ila_net_id);
        struct sk_buff *msg;
-       struct ila_xlat_params p;
+       struct ila_xlat_params xp;
        struct ila_map *ila;
        int ret;
 
-       ret = parse_nl_config(info, &p);
+       ret = parse_nl_config(info, &xp);
        if (ret)
                return ret;
 
@@ -471,7 +454,7 @@ static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
 
        rcu_read_lock();
 
-       ila = ila_lookup_by_params(&p, ilan);
+       ila = ila_lookup_by_params(&xp, ilan);
        if (ila) {
                ret = ila_dump_info(ila,
                                    info->snd_portid,
@@ -614,45 +597,32 @@ static struct pernet_operations ila_net_ops = {
        .size = sizeof(struct ila_net),
 };
 
-static int ila_xlat_addr(struct sk_buff *skb, int dir)
+static int ila_xlat_addr(struct sk_buff *skb)
 {
        struct ila_map *ila;
        struct ipv6hdr *ip6h = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
        struct ila_net *ilan = net_generic(net, ila_net_id);
-       __be64 identifier, locator_match;
-       size_t nhoff;
+       struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
 
        /* Assumes skb contains a valid IPv6 header that is pulled */
 
-       identifier = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[8];
-       locator_match = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[0];
-       nhoff = sizeof(struct ipv6hdr);
+       if (!ila_addr_is_ila(iaddr)) {
+               /* Type indicates this is not an ILA address */
+               return 0;
+       }
 
        rcu_read_lock();
 
-       ila = ila_lookup_wildcards(identifier, locator_match,
-                                  skb->dev->ifindex, dir, ilan);
+       ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
        if (ila)
-               update_ipv6_locator(skb, &ila->p.ip);
+               ila_update_ipv6_locator(skb, &ila->xp.ip);
 
        rcu_read_unlock();
 
        return 0;
 }
 
-int ila_xlat_incoming(struct sk_buff *skb)
-{
-       return ila_xlat_addr(skb, ILA_DIR_IN);
-}
-EXPORT_SYMBOL(ila_xlat_incoming);
-
-int ila_xlat_outgoing(struct sk_buff *skb)
-{
-       return ila_xlat_addr(skb, ILA_DIR_OUT);
-}
-EXPORT_SYMBOL(ila_xlat_outgoing);
-
 int ila_xlat_init(void)
 {
        int ret;
index f1678388fb0da410f19df59925b41f504d925af6..00cf28ad45650c801c90c37fb571acb7d1615183 100644 (file)
@@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
                sk_nulls_del_node_init_rcu((struct sock *)tw);
-               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+               __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
index ea071fad67a03e9d884f99ddb64ea71dc8c31116..1bcef2369d64e6f1325dcab50c14601e6ca5a40a 100644 (file)
@@ -240,6 +240,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id)
 
        return tb;
 }
+EXPORT_SYMBOL_GPL(fib6_new_table);
 
 struct fib6_table *fib6_get_table(struct net *net, u32 id)
 {
index 35d3ddc328f807a2d25edd99510f313d153eca62..b912f0dbaf724f63d60f2d06dae57eba334c96fa 100644 (file)
@@ -373,7 +373,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                struct msghdr msg;
                struct flowi6 flowi6;
                struct sockcm_cookie sockc_junk;
-               int junk;
+               struct ipcm6_cookie ipc6;
 
                err = -ENOMEM;
                fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
@@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                msg.msg_control = (void *)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
-                                           &junk, &junk, &junk, &sockc_junk);
+               ipc6.opt = fl->opt;
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk);
                if (err)
                        goto done;
                err = -EINVAL;
index 4e636e60a360d5c22dc856c2c80d361127ba41b6..ee62ec469ab3e6ad97d18656afe58cf440148071 100644 (file)
@@ -54,6 +54,7 @@
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 #include <net/ip6_tunnel.h>
+#include <net/gre.h>
 
 
 static bool log_ecn_error = true;
@@ -342,7 +343,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
                goto failed_free;
 
        /* Can use a lockless transmit, unless we generate output sequences */
-       if (!(nt->parms.o_flags & GRE_SEQ))
+       if (!(nt->parms.o_flags & TUNNEL_SEQ))
                dev->features |= NETIF_F_LLTX;
 
        dev_hold(dev);
@@ -443,137 +444,41 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        t->err_time = jiffies;
 }
 
-static int ip6gre_rcv(struct sk_buff *skb)
+static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
        const struct ipv6hdr *ipv6h;
-       u8     *h;
-       __be16    flags;
-       __sum16   csum = 0;
-       __be32 key = 0;
-       u32    seqno = 0;
        struct ip6_tnl *tunnel;
-       int    offset = 4;
-       __be16 gre_proto;
-       int err;
-
-       if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
-               goto drop;
 
        ipv6h = ipv6_hdr(skb);
-       h = skb->data;
-       flags = *(__be16 *)h;
-
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
-               /* - Version must be 0.
-                  - We do not support routing headers.
-                */
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       goto drop;
-
-               if (flags&GRE_CSUM) {
-                       csum = skb_checksum_simple_validate(skb);
-                       offset += 4;
-               }
-               if (flags&GRE_KEY) {
-                       key = *(__be32 *)(h + offset);
-                       offset += 4;
-               }
-               if (flags&GRE_SEQ) {
-                       seqno = ntohl(*(__be32 *)(h + offset));
-                       offset += 4;
-               }
-       }
-
-       gre_proto = *(__be16 *)(h + 2);
-
        tunnel = ip6gre_tunnel_lookup(skb->dev,
-                                         &ipv6h->saddr, &ipv6h->daddr, key,
-                                         gre_proto);
+                                     &ipv6h->saddr, &ipv6h->daddr, tpi->key,
+                                     tpi->proto);
        if (tunnel) {
-               struct pcpu_sw_netstats *tstats;
-
-               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
-                       goto drop;
-
-               if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
-                       tunnel->dev->stats.rx_dropped++;
-                       goto drop;
-               }
-
-               skb->protocol = gre_proto;
-               /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IPv6
-                * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-                */
-               if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IPV6);
-                       if ((*(h + offset) & 0xF0) != 0x40)
-                               offset += 4;
-               }
-
-               skb->mac_header = skb->network_header;
-               __pskb_pull(skb, offset);
-               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
-
-               if (((flags&GRE_CSUM) && csum) ||
-                   (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
-                       tunnel->dev->stats.rx_crc_errors++;
-                       tunnel->dev->stats.rx_errors++;
-                       goto drop;
-               }
-               if (tunnel->parms.i_flags&GRE_SEQ) {
-                       if (!(flags&GRE_SEQ) ||
-                           (tunnel->i_seqno &&
-                                       (s32)(seqno - tunnel->i_seqno) < 0)) {
-                               tunnel->dev->stats.rx_fifo_errors++;
-                               tunnel->dev->stats.rx_errors++;
-                               goto drop;
-                       }
-                       tunnel->i_seqno = seqno + 1;
-               }
-
-               /* Warning: All skb pointers will be invalidated! */
-               if (tunnel->dev->type == ARPHRD_ETHER) {
-                       if (!pskb_may_pull(skb, ETH_HLEN)) {
-                               tunnel->dev->stats.rx_length_errors++;
-                               tunnel->dev->stats.rx_errors++;
-                               goto drop;
-                       }
-
-                       ipv6h = ipv6_hdr(skb);
-                       skb->protocol = eth_type_trans(skb, tunnel->dev);
-                       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-               }
+               ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
 
-               __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+               return PACKET_RCVD;
+       }
 
-               skb_reset_network_header(skb);
+       return PACKET_REJECT;
+}
 
-               err = IP6_ECN_decapsulate(ipv6h, skb);
-               if (unlikely(err)) {
-                       if (log_ecn_error)
-                               net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
-                                                    &ipv6h->saddr,
-                                                    ipv6_get_dsfield(ipv6h));
-                       if (err > 1) {
-                               ++tunnel->dev->stats.rx_frame_errors;
-                               ++tunnel->dev->stats.rx_errors;
-                               goto drop;
-                       }
-               }
+static int gre_rcv(struct sk_buff *skb)
+{
+       struct tnl_ptk_info tpi;
+       bool csum_err = false;
+       int hdr_len;
 
-               tstats = this_cpu_ptr(tunnel->dev->tstats);
-               u64_stats_update_begin(&tstats->syncp);
-               tstats->rx_packets++;
-               tstats->rx_bytes += skb->len;
-               u64_stats_update_end(&tstats->syncp);
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err);
+       if (hdr_len < 0)
+               goto drop;
 
-               netif_rx(skb);
+       if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
+               goto drop;
 
+       if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
                return 0;
-       }
-       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
        kfree_skb(skb);
        return 0;
@@ -584,187 +489,40 @@ struct ipv6_tel_txoption {
        __u8 dst_opt[8];
 };
 
-static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
-       memset(opt, 0, sizeof(struct ipv6_tel_txoption));
-
-       opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
-       opt->dst_opt[3] = 1;
-       opt->dst_opt[4] = encap_limit;
-       opt->dst_opt[5] = IPV6_TLV_PADN;
-       opt->dst_opt[6] = 1;
-
-       opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
-       opt->ops.opt_nflen = 8;
+       return iptunnel_handle_offloads(skb,
+                                       csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
-static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
-                        struct net_device *dev,
-                        __u8 dsfield,
-                        struct flowi6 *fl6,
-                        int encap_limit,
-                        __u32 *pmtu)
+static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+                              struct net_device *dev, __u8 dsfield,
+                              struct flowi6 *fl6, int encap_limit,
+                              __u32 *pmtu, __be16 proto)
 {
        struct ip6_tnl *tunnel = netdev_priv(dev);
-       struct net *net = tunnel->net;
-       struct net_device *tdev;    /* Device to other host */
-       struct ipv6hdr  *ipv6h;     /* Our new IP header */
-       unsigned int max_headroom = 0; /* The extra header space needed */
-       int    gre_hlen;
-       struct ipv6_tel_txoption opt;
-       int    mtu;
-       struct dst_entry *dst = NULL, *ndst = NULL;
-       struct net_device_stats *stats = &tunnel->dev->stats;
-       int err = -1;
-       u8 proto;
-       struct sk_buff *new_skb;
-       __be16 protocol;
+       __be16 protocol = (dev->type == ARPHRD_ETHER) ?
+                         htons(ETH_P_TEB) : proto;
 
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
 
-       if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
-               gre_hlen = 0;
-               ipv6h = (struct ipv6hdr *)skb->data;
-               fl6->daddr = ipv6h->daddr;
-       } else {
-               gre_hlen = tunnel->hlen;
+       if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
+               fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
+       else
                fl6->daddr = tunnel->parms.raddr;
-       }
-
-       if (!fl6->flowi6_mark)
-               dst = dst_cache_get(&tunnel->dst_cache);
-
-       if (!dst) {
-               dst = ip6_route_output(net, NULL, fl6);
-
-               if (dst->error)
-                       goto tx_err_link_failure;
-               dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
-               if (IS_ERR(dst)) {
-                       err = PTR_ERR(dst);
-                       dst = NULL;
-                       goto tx_err_link_failure;
-               }
-               ndst = dst;
-       }
-
-       tdev = dst->dev;
-
-       if (tdev == dev) {
-               stats->collisions++;
-               net_warn_ratelimited("%s: Local routing loop detected!\n",
-                                    tunnel->parms.name);
-               goto tx_err_dst_release;
-       }
-
-       mtu = dst_mtu(dst) - sizeof(*ipv6h);
-       if (encap_limit >= 0) {
-               max_headroom += 8;
-               mtu -= 8;
-       }
-       if (mtu < IPV6_MIN_MTU)
-               mtu = IPV6_MIN_MTU;
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-       if (skb->len > mtu) {
-               *pmtu = mtu;
-               err = -EMSGSIZE;
-               goto tx_err_dst_release;
-       }
-
-       if (tunnel->err_count > 0) {
-               if (time_before(jiffies,
-                               tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
-                       tunnel->err_count--;
 
-                       dst_link_failure(skb);
-               } else
-                       tunnel->err_count = 0;
-       }
-
-       skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
-
-       max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
-
-       if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
-           (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
-               new_skb = skb_realloc_headroom(skb, max_headroom);
-               if (max_headroom > dev->needed_headroom)
-                       dev->needed_headroom = max_headroom;
-               if (!new_skb)
-                       goto tx_err_dst_release;
-
-               if (skb->sk)
-                       skb_set_owner_w(new_skb, skb->sk);
-               consume_skb(skb);
-               skb = new_skb;
-       }
+       if (tunnel->parms.o_flags & TUNNEL_SEQ)
+               tunnel->o_seqno++;
 
-       if (!fl6->flowi6_mark && ndst)
-               dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr);
-       skb_dst_set(skb, dst);
-
-       proto = NEXTHDR_GRE;
-       if (encap_limit >= 0) {
-               init_tel_txopt(&opt, encap_limit);
-               ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
-       }
-
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
-
-       skb_push(skb, gre_hlen);
-       skb_reset_network_header(skb);
-       skb_set_transport_header(skb, sizeof(*ipv6h));
-
-       /*
-        *      Push down and install the IP header.
-        */
-       ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-                    ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
-       ipv6h->hop_limit = tunnel->parms.hop_limit;
-       ipv6h->nexthdr = proto;
-       ipv6h->saddr = fl6->saddr;
-       ipv6h->daddr = fl6->daddr;
-
-       ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
-       protocol = (dev->type == ARPHRD_ETHER) ?
-                   htons(ETH_P_TEB) : skb->protocol;
-       ((__be16 *)(ipv6h + 1))[1] = protocol;
-
-       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
-               __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
-
-               if (tunnel->parms.o_flags&GRE_SEQ) {
-                       ++tunnel->o_seqno;
-                       *ptr = htonl(tunnel->o_seqno);
-                       ptr--;
-               }
-               if (tunnel->parms.o_flags&GRE_KEY) {
-                       *ptr = tunnel->parms.o_key;
-                       ptr--;
-               }
-               if (tunnel->parms.o_flags&GRE_CSUM) {
-                       *ptr = 0;
-                       *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
-                               skb->len - sizeof(struct ipv6hdr));
-               }
-       }
+       /* Push GRE header. */
+       gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+                        protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
        skb_set_inner_protocol(skb, protocol);
 
-       ip6tunnel_xmit(NULL, skb, dev);
-       return 0;
-tx_err_link_failure:
-       stats->tx_carrier_errors++;
-       dst_link_failure(skb);
-tx_err_dst_release:
-       dst_release(dst);
-       return err;
+       return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
+                           NEXTHDR_GRE);
 }
 
 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
@@ -783,7 +541,6 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_GRE;
 
        dsfield = ipv4_get_dsfield(iph);
 
@@ -793,7 +550,12 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
 
-       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+       if (err)
+               return -1;
+
+       err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+                         skb->protocol);
        if (err != 0) {
                /* XXX: send ICMP error even if DF is not set. */
                if (err == -EMSGSIZE)
@@ -833,7 +595,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_GRE;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -843,7 +604,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
 
-       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
+               return -1;
+
+       err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
+                         &mtu, skb->protocol);
        if (err != 0) {
                if (err == -EMSGSIZE)
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -887,7 +652,11 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
        fl6.flowi6_proto = skb->protocol;
 
-       err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+       err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+       if (err)
+               return err;
+
+       err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
 
        return err;
 }
@@ -931,7 +700,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        struct net_device *dev = t->dev;
        struct __ip6_tnl_parm *p = &t->parms;
        struct flowi6 *fl6 = &t->fl.u.ip6;
-       int addend = sizeof(struct ipv6hdr) + 4;
+       int t_hlen;
 
        if (dev->type != ARPHRD_ETHER) {
                memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -958,16 +727,11 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       /* Precalculate GRE options length */
-       if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
-               if (t->parms.o_flags&GRE_CSUM)
-                       addend += 4;
-               if (t->parms.o_flags&GRE_KEY)
-                       addend += 4;
-               if (t->parms.o_flags&GRE_SEQ)
-                       addend += 4;
-       }
-       t->hlen = addend;
+       t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
+
+       t->hlen = t->tun_hlen;
+
+       t_hlen = t->hlen + sizeof(struct ipv6hdr);
 
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
@@ -981,12 +745,15 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                        return;
 
                if (rt->dst.dev) {
-                       dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+                       dev->hard_header_len = rt->dst.dev->hard_header_len +
+                                              t_hlen;
 
                        if (set_mtu) {
-                               dev->mtu = rt->dst.dev->mtu - addend;
+                               dev->mtu = rt->dst.dev->mtu - t_hlen;
                                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                                        dev->mtu -= 8;
+                               if (dev->type == ARPHRD_ETHER)
+                                       dev->mtu -= ETH_HLEN;
 
                                if (dev->mtu < IPV6_MIN_MTU)
                                        dev->mtu = IPV6_MIN_MTU;
@@ -1028,8 +795,8 @@ static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
        p->link = u->link;
        p->i_key = u->i_key;
        p->o_key = u->o_key;
-       p->i_flags = u->i_flags;
-       p->o_flags = u->o_flags;
+       p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
+       p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
        memcpy(p->name, u->name, sizeof(u->name));
 }
 
@@ -1046,8 +813,8 @@ static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
        u->link = p->link;
        u->i_key = p->i_key;
        u->o_key = p->o_key;
-       u->i_flags = p->i_flags;
-       u->o_flags = p->o_flags;
+       u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
+       u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
        memcpy(u->name, p->name, sizeof(u->name));
 }
 
@@ -1061,6 +828,8 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
        struct net *net = t->net;
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
+       memset(&p1, 0, sizeof(p1));
+
        switch (cmd) {
        case SIOCGETTUNNEL:
                if (dev == ign->fb_tunnel_dev) {
@@ -1160,15 +929,6 @@ done:
        return err;
 }
 
-static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
-       if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len)
-               return -EINVAL;
-       dev->mtu = new_mtu;
-       return 0;
-}
-
 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
                        unsigned short type,
                        const void *daddr, const void *saddr, unsigned int len)
@@ -1212,7 +972,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
        .ndo_uninit             = ip6gre_tunnel_uninit,
        .ndo_start_xmit         = ip6gre_tunnel_xmit,
        .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
-       .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
+       .ndo_change_mtu         = ip6_tnl_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
        .ndo_get_iflink         = ip6_tnl_get_iflink,
 };
@@ -1228,17 +988,11 @@ static void ip6gre_dev_free(struct net_device *dev)
 
 static void ip6gre_tunnel_setup(struct net_device *dev)
 {
-       struct ip6_tnl *t;
-
        dev->netdev_ops = &ip6gre_netdev_ops;
        dev->destructor = ip6gre_dev_free;
 
        dev->type = ARPHRD_IP6GRE;
-       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
-       dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
-       t = netdev_priv(dev);
-       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-               dev->mtu -= 8;
+
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
@@ -1248,6 +1002,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
        int ret;
+       int t_hlen;
 
        tunnel = netdev_priv(dev);
 
@@ -1266,6 +1021,17 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
                return ret;
        }
 
+       tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
+
+       tunnel->hlen = tunnel->tun_hlen;
+
+       t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+
+       dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+       dev->mtu = ETH_DATA_LEN - t_hlen;
+       if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu -= 8;
+
        return 0;
 }
 
@@ -1304,7 +1070,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
 
 
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
-       .handler     = ip6gre_rcv,
+       .handler     = gre_rcv,
        .err_handler = ip6gre_err,
        .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
@@ -1448,10 +1214,12 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
                parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
 
        if (data[IFLA_GRE_IFLAGS])
-               parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+               parms->i_flags = gre_flags_to_tnl_flags(
+                               nla_get_be16(data[IFLA_GRE_IFLAGS]));
 
        if (data[IFLA_GRE_OFLAGS])
-               parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+               parms->o_flags = gre_flags_to_tnl_flags(
+                               nla_get_be16(data[IFLA_GRE_OFLAGS]));
 
        if (data[IFLA_GRE_IKEY])
                parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@@ -1500,11 +1268,16 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_start_xmit = ip6gre_tunnel_xmit,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
-       .ndo_change_mtu = ip6gre_tunnel_change_mtu,
+       .ndo_change_mtu = ip6_tnl_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
        .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
+#define GRE6_FEATURES (NETIF_F_SG |            \
+                      NETIF_F_FRAGLIST |       \
+                      NETIF_F_HIGHDMA |                \
+                      NETIF_F_HW_CSUM)
+
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
@@ -1538,9 +1311,21 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
        nt->net = dev_net(dev);
        ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
-       /* Can use a lockless transmit, unless we generate output sequences */
-       if (!(nt->parms.o_flags & GRE_SEQ))
+       dev->features           |= GRE6_FEATURES;
+       dev->hw_features        |= GRE6_FEATURES;
+
+       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+               /* TCP segmentation offload is not supported when we
+                * generate output sequences.
+                */
+               dev->features    |= NETIF_F_GSO_SOFTWARE;
+               dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+               /* Can use a lockless transmit, unless we generate
+                * output sequences
+                */
                dev->features |= NETIF_F_LLTX;
+       }
 
        err = register_netdevice(dev);
        if (err)
@@ -1626,8 +1411,10 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct __ip6_tnl_parm *p = &t->parms;
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
-           nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
-           nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS,
+                        gre_tnl_flags_to_gre_flags(p->i_flags)) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS,
+                        gre_tnl_flags_to_gre_flags(p->o_flags)) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
            nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
index c05c425c23890b4b5ce077d5ca835a7e644ab881..f185cbcda1144f02f81657b052f39f2fba8e3e20 100644 (file)
 
 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       /* if ingress device is enslaved to an L3 master device pass the
+        * skb to its handler for processing
+        */
+       skb = l3mdev_ip6_rcv(skb);
+       if (!skb)
+               return NET_RX_SUCCESS;
+
        if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct inet6_protocol *ipprot;
 
@@ -78,11 +85,11 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
 
        idev = __in6_dev_get(skb->dev);
 
-       IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len);
+       __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
            !idev || unlikely(idev->cnf.disable_ipv6)) {
-               IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+               __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -109,10 +116,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (hdr->version != 6)
                goto err;
 
-       IP6_ADD_STATS_BH(net, idev,
-                        IPSTATS_MIB_NOECTPKTS +
+       __IP6_ADD_STATS(net, idev,
+                       IPSTATS_MIB_NOECTPKTS +
                                (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
-                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+                       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
        /*
         * RFC4291 2.5.3
         * A packet received on an interface with a destination address
@@ -169,12 +176,12 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        /* pkt_len may be zero if Jumbo payload option is present */
        if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
                if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
-                       IP6_INC_STATS_BH(net,
-                                        idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+                       __IP6_INC_STATS(net,
+                                       idev, IPSTATS_MIB_INTRUNCATEDPKTS);
                        goto drop;
                }
                if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
-                       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+                       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
                        goto drop;
                }
                hdr = ipv6_hdr(skb);
@@ -182,7 +189,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
 
        if (hdr->nexthdr == NEXTHDR_HOP) {
                if (ipv6_parse_hopopts(skb) < 0) {
-                       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+                       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
                        rcu_read_unlock();
                        return NET_RX_DROP;
                }
@@ -197,7 +204,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                       net, NULL, skb, dev, NULL,
                       ip6_rcv_finish);
 err:
-       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
        rcu_read_unlock();
        kfree_skb(skb);
@@ -259,18 +266,18 @@ resubmit:
                if (ret > 0)
                        goto resubmit;
                else if (ret == 0)
-                       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
+                       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
        } else {
                if (!raw) {
                        if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                               IP6_INC_STATS_BH(net, idev,
-                                                IPSTATS_MIB_INUNKNOWNPROTOS);
+                               __IP6_INC_STATS(net, idev,
+                                               IPSTATS_MIB_INUNKNOWNPROTOS);
                                icmpv6_send(skb, ICMPV6_PARAMPROB,
                                            ICMPV6_UNK_NEXTHDR, nhoff);
                        }
                        kfree_skb(skb);
                } else {
-                       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
+                       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
                        consume_skb(skb);
                }
        }
@@ -278,7 +285,7 @@ resubmit:
        return 0;
 
 discard:
-       IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
        rcu_read_unlock();
        kfree_skb(skb);
        return 0;
@@ -297,7 +304,7 @@ int ip6_mc_input(struct sk_buff *skb)
        const struct ipv6hdr *hdr;
        bool deliver;
 
-       IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
+       __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
                         ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
                         skb->len);
 
index 204af221947139c0c308062921a73484df306031..f5eb184e109307b355949d02103f83c2d1a7d737 100644 (file)
@@ -63,6 +63,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        int proto;
        struct frag_hdr *fptr;
        unsigned int unfrag_ip6hlen;
+       unsigned int payload_len;
        u8 *prevhdr;
        int offset = 0;
        bool encap, udpfrag;
@@ -73,6 +74,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                       SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
+                      SKB_GSO_TCP_FIXEDID |
+                      SKB_GSO_TCPV6 |
                       SKB_GSO_GRE |
                       SKB_GSO_GRE_CSUM |
                       SKB_GSO_IPIP |
@@ -80,7 +83,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_UDP_TUNNEL_CSUM |
                       SKB_GSO_TUNNEL_REMCSUM |
-                      SKB_GSO_TCPV6 |
+                      SKB_GSO_PARTIAL |
                       0)))
                goto out;
 
@@ -117,7 +120,13 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-               ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+               if (skb_is_gso(skb))
+                       payload_len = skb_shinfo(skb)->gso_size +
+                                     SKB_GSO_CB(skb)->data_offset +
+                                     skb->head - (unsigned char *)(ipv6h + 1);
+               else
+                       payload_len = skb->len - nhoff - sizeof(*ipv6h);
+               ipv6h->payload_len = htons(payload_len);
                skb->network_header = (u8 *)ipv6h - skb->head;
 
                if (udpfrag) {
@@ -239,10 +248,14 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
                NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
                NAPI_GRO_CB(p)->flush |= flush;
 
-               /* Clear flush_id, there's really no concept of ID in IPv6. */
-               NAPI_GRO_CB(p)->flush_id = 0;
+               /* If the previous IP ID value was based on an atomic
+                * datagram we can overwrite the value and ignore it.
+                */
+               if (NAPI_GRO_CB(skb)->is_atomic)
+                       NAPI_GRO_CB(p)->flush_id = 0;
        }
 
+       NAPI_GRO_CB(skb)->is_atomic = true;
        NAPI_GRO_CB(skb)->flush |= flush;
 
        skb_gro_postpull_rcsum(skb, iph, nlen);
index 612f3d138bf05e782100a6ae2aa4d80f9dbd7209..cbf127ae7c676650cc626cbf12cd61b6b570ea43 100644 (file)
@@ -395,8 +395,8 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
 
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_INDISCARDS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -427,8 +427,8 @@ int ip6_forward(struct sk_buff *skb)
                /* Force OUTPUT device used as source address */
                skb->dev = dst->dev;
                icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_INHDRERRORS);
 
                kfree_skb(skb);
                return -ETIMEDOUT;
@@ -441,15 +441,15 @@ int ip6_forward(struct sk_buff *skb)
                if (proxied > 0)
                        return ip6_input(skb);
                else if (proxied < 0) {
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                        IPSTATS_MIB_INDISCARDS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                                       IPSTATS_MIB_INDISCARDS);
                        goto drop;
                }
        }
 
        if (!xfrm6_route_forward(skb)) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_INDISCARDS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
        dst = skb_dst(skb);
@@ -505,17 +505,17 @@ int ip6_forward(struct sk_buff *skb)
                /* Again, force OUTPUT device used as source address */
                skb->dev = dst->dev;
                icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_INTOOBIGERRORS);
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_FRAGFAILS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_INTOOBIGERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_FRAGFAILS);
                kfree_skb(skb);
                return -EMSGSIZE;
        }
 
        if (skb_cow(skb, dst->dev->hard_header_len)) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-                                IPSTATS_MIB_OUTDISCARDS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_OUTDISCARDS);
                goto drop;
        }
 
@@ -525,14 +525,14 @@ int ip6_forward(struct sk_buff *skb)
 
        hdr->hop_limit--;
 
-       IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+       __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
        return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
                       net, NULL, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
 
 error:
-       IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
+       __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 drop:
        kfree_skb(skb);
        return -EINVAL;
@@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                        int getfrag(void *from, char *to, int offset, int len,
                        int odd, struct sk_buff *skb),
                        void *from, int length, int hh_len, int fragheaderlen,
-                       int transhdrlen, int mtu, unsigned int flags,
-                       const struct flowi6 *fl6)
+                       int exthdrlen, int transhdrlen, int mtu,
+                       unsigned int flags, const struct flowi6 *fl6)
 
 {
        struct sk_buff *skb;
@@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb_put(skb, fragheaderlen + transhdrlen);
 
                /* initialize network header pointer */
-               skb_reset_network_header(skb);
+               skb_set_network_header(skb, exthdrlen);
 
                /* initialize protocol header pointer */
                skb->transport_header = skb->network_header + fragheaderlen;
@@ -1182,12 +1182,12 @@ static void ip6_append_data_mtu(unsigned int *mtu,
 }
 
 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
-                         struct inet6_cork *v6_cork,
-                         int hlimit, int tclass, struct ipv6_txoptions *opt,
+                         struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
                          struct rt6_info *rt, struct flowi6 *fl6)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        unsigned int mtu;
+       struct ipv6_txoptions *opt = ipc6->opt;
 
        /*
         * setup for corking
@@ -1229,8 +1229,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        dst_hold(&rt->dst);
        cork->base.dst = &rt->dst;
        cork->fl.u.ip6 = *fl6;
-       v6_cork->hop_limit = hlimit;
-       v6_cork->tclass = tclass;
+       v6_cork->hop_limit = ipc6->hlimit;
+       v6_cork->tclass = ipc6->tclass;
        if (rt->dst.flags & DST_XFRM_TUNNEL)
                mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
                      rt->dst.dev->mtu : dst_mtu(&rt->dst);
@@ -1258,7 +1258,7 @@ static int __ip6_append_data(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
                             void *from, int length, int transhdrlen,
-                            unsigned int flags, int dontfrag,
+                            unsigned int flags, struct ipcm6_cookie *ipc6,
                             const struct sockcm_cookie *sockc)
 {
        struct sk_buff *skb, *skb_prev = NULL;
@@ -1298,7 +1298,7 @@ static int __ip6_append_data(struct sock *sk,
                      sizeof(struct frag_hdr) : 0) +
                     rt->rt6i_nfheader_len;
 
-       if (cork->length + length > mtu - headersize && dontfrag &&
+       if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
            (sk->sk_protocol == IPPROTO_UDP ||
             sk->sk_protocol == IPPROTO_RAW)) {
                ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
@@ -1359,7 +1359,7 @@ emsgsize:
            (rt->dst.dev->features & NETIF_F_UFO) &&
            (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
-                                         hh_len, fragheaderlen,
+                                         hh_len, fragheaderlen, exthdrlen,
                                          transhdrlen, mtu, flags, fl6);
                if (err)
                        goto error;
@@ -1564,9 +1564,9 @@ error:
 int ip6_append_data(struct sock *sk,
                    int getfrag(void *from, char *to, int offset, int len,
                                int odd, struct sk_buff *skb),
-                   void *from, int length, int transhdrlen, int hlimit,
-                   int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
-                   struct rt6_info *rt, unsigned int flags, int dontfrag,
+                   void *from, int length, int transhdrlen,
+                   struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+                   struct rt6_info *rt, unsigned int flags,
                    const struct sockcm_cookie *sockc)
 {
        struct inet_sock *inet = inet_sk(sk);
@@ -1580,12 +1580,12 @@ int ip6_append_data(struct sock *sk,
                /*
                 * setup for corking
                 */
-               err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
-                                    tclass, opt, rt, fl6);
+               err = ip6_setup_cork(sk, &inet->cork, &np->cork,
+                                    ipc6, rt, fl6);
                if (err)
                        return err;
 
-               exthdrlen = (opt ? opt->opt_flen : 0);
+               exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
                length += exthdrlen;
                transhdrlen += exthdrlen;
        } else {
@@ -1595,8 +1595,7 @@ int ip6_append_data(struct sock *sk,
 
        return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
                                 &np->cork, sk_page_frag(sk), getfrag,
-                                from, length, transhdrlen, flags, dontfrag,
-                                sockc);
+                                from, length, transhdrlen, flags, ipc6, sockc);
 }
 EXPORT_SYMBOL_GPL(ip6_append_data);
 
@@ -1752,15 +1751,14 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
                             void *from, int length, int transhdrlen,
-                            int hlimit, int tclass,
-                            struct ipv6_txoptions *opt, struct flowi6 *fl6,
+                            struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
                             struct rt6_info *rt, unsigned int flags,
-                            int dontfrag, const struct sockcm_cookie *sockc)
+                            const struct sockcm_cookie *sockc)
 {
        struct inet_cork_full cork;
        struct inet6_cork v6_cork;
        struct sk_buff_head queue;
-       int exthdrlen = (opt ? opt->opt_flen : 0);
+       int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
        int err;
 
        if (flags & MSG_PROBE)
@@ -1772,17 +1770,17 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
        cork.base.addr = 0;
        cork.base.opt = NULL;
        v6_cork.opt = NULL;
-       err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+       err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
        if (err)
                return ERR_PTR(err);
 
-       if (dontfrag < 0)
-               dontfrag = inet6_sk(sk)->dontfrag;
+       if (ipc6->dontfrag < 0)
+               ipc6->dontfrag = inet6_sk(sk)->dontfrag;
 
        err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
                                &current->task_frag, getfrag, from,
                                length + exthdrlen, transhdrlen + exthdrlen,
-                               flags, dontfrag, sockc);
+                               flags, ipc6, sockc);
        if (err) {
                __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
                return ERR_PTR(err);
index eb2ac4bb09ce0fb0f9afb3fcc142aad80270a6cb..e79330f214bd56bd061fd994ac5834ac1403583d 100644 (file)
@@ -238,6 +238,7 @@ static void ip6_dev_free(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
 
+       gro_cells_destroy(&t->gro_cells);
        dst_cache_destroy(&t->dst_cache);
        free_percpu(dev->tstats);
        free_netdev(dev);
@@ -252,12 +253,12 @@ static int ip6_tnl_create2(struct net_device *dev)
 
        t = netdev_priv(dev);
 
+       dev->rtnl_link_ops = &ip6_link_ops;
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        strcpy(t->parms.name, dev->name);
-       dev->rtnl_link_ops = &ip6_link_ops;
 
        dev_hold(dev);
        ip6_tnl_link(ip6n, t);
@@ -753,97 +754,157 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
-/**
- * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
- *   @skb: received socket buffer
- *   @protocol: ethernet protocol ID
- *   @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
- *
- * Return: 0
- **/
-
-static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
-                      __u8 ipproto,
-                      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
-                                                  const struct ipv6hdr *ipv6h,
-                                                  struct sk_buff *skb))
+static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+                        const struct tnl_ptk_info *tpi,
+                        struct metadata_dst *tun_dst,
+                        int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+                                               const struct ipv6hdr *ipv6h,
+                                               struct sk_buff *skb),
+                        bool log_ecn_err)
 {
-       struct ip6_tnl *t;
+       struct pcpu_sw_netstats *tstats;
        const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-       u8 tproto;
        int err;
 
-       rcu_read_lock();
-       t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
-       if (t) {
-               struct pcpu_sw_netstats *tstats;
+       if ((!(tpi->flags & TUNNEL_CSUM) &&
+            (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
+           ((tpi->flags & TUNNEL_CSUM) &&
+            !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
+               tunnel->dev->stats.rx_crc_errors++;
+               tunnel->dev->stats.rx_errors++;
+               goto drop;
+       }
 
-               tproto = ACCESS_ONCE(t->parms.proto);
-               if (tproto != ipproto && tproto != 0) {
-                       rcu_read_unlock();
-                       goto discard;
+       if (tunnel->parms.i_flags & TUNNEL_SEQ) {
+               if (!(tpi->flags & TUNNEL_SEQ) ||
+                   (tunnel->i_seqno &&
+                    (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
+                       tunnel->dev->stats.rx_fifo_errors++;
+                       tunnel->dev->stats.rx_errors++;
+                       goto drop;
                }
+               tunnel->i_seqno = ntohl(tpi->seq) + 1;
+       }
 
-               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                       rcu_read_unlock();
-                       goto discard;
-               }
+       skb->protocol = tpi->proto;
 
-               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
-                       t->dev->stats.rx_dropped++;
-                       rcu_read_unlock();
-                       goto discard;
+       /* Warning: All skb pointers will be invalidated! */
+       if (tunnel->dev->type == ARPHRD_ETHER) {
+               if (!pskb_may_pull(skb, ETH_HLEN)) {
+                       tunnel->dev->stats.rx_length_errors++;
+                       tunnel->dev->stats.rx_errors++;
+                       goto drop;
                }
-               skb->mac_header = skb->network_header;
-               skb_reset_network_header(skb);
-               skb->protocol = htons(protocol);
-               memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
-
-               __skb_tunnel_rx(skb, t->dev, t->net);
-
-               err = dscp_ecn_decapsulate(t, ipv6h, skb);
-               if (unlikely(err)) {
-                       if (log_ecn_error)
-                               net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
-                                                    &ipv6h->saddr,
-                                                    ipv6_get_dsfield(ipv6h));
-                       if (err > 1) {
-                               ++t->dev->stats.rx_frame_errors;
-                               ++t->dev->stats.rx_errors;
-                               rcu_read_unlock();
-                               goto discard;
-                       }
+
+               ipv6h = ipv6_hdr(skb);
+               skb->protocol = eth_type_trans(skb, tunnel->dev);
+               skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+       } else {
+               skb->dev = tunnel->dev;
+       }
+
+       skb_reset_network_header(skb);
+       memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
+       __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+
+       err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
+       if (unlikely(err)) {
+               if (log_ecn_err)
+                       net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
+                                            &ipv6h->saddr,
+                                            ipv6_get_dsfield(ipv6h));
+               if (err > 1) {
+                       ++tunnel->dev->stats.rx_frame_errors;
+                       ++tunnel->dev->stats.rx_errors;
+                       goto drop;
                }
+       }
 
-               tstats = this_cpu_ptr(t->dev->tstats);
-               u64_stats_update_begin(&tstats->syncp);
-               tstats->rx_packets++;
-               tstats->rx_bytes += skb->len;
-               u64_stats_update_end(&tstats->syncp);
+       tstats = this_cpu_ptr(tunnel->dev->tstats);
+       u64_stats_update_begin(&tstats->syncp);
+       tstats->rx_packets++;
+       tstats->rx_bytes += skb->len;
+       u64_stats_update_end(&tstats->syncp);
 
-               netif_rx(skb);
+       skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
 
-               rcu_read_unlock();
-               return 0;
+       gro_cells_receive(&tunnel->gro_cells, skb);
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
+               const struct tnl_ptk_info *tpi,
+               struct metadata_dst *tun_dst,
+               bool log_ecn_err)
+{
+       return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+                            log_ecn_err);
+}
+EXPORT_SYMBOL(ip6_tnl_rcv);
+
+static const struct tnl_ptk_info tpi_v6 = {
+       /* no tunnel info required for ipxip6. */
+       .proto = htons(ETH_P_IPV6),
+};
+
+static const struct tnl_ptk_info tpi_v4 = {
+       /* no tunnel info required for ipxip6. */
+       .proto = htons(ETH_P_IP),
+};
+
+static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
+                     const struct tnl_ptk_info *tpi,
+                     int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+                                                 const struct ipv6hdr *ipv6h,
+                                                 struct sk_buff *skb))
+{
+       struct ip6_tnl *t;
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int ret = -1;
+
+       rcu_read_lock();
+       t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
+
+       if (t) {
+               u8 tproto = ACCESS_ONCE(t->parms.proto);
+
+               if (tproto != ipproto && tproto != 0)
+                       goto drop;
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
+                       goto drop;
+               if (iptunnel_pull_header(skb, 0, tpi->proto, false))
+                       goto drop;
+               ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
+                                   log_ecn_error);
        }
+
        rcu_read_unlock();
-       return 1;
 
-discard:
+       return ret;
+
+drop:
+       rcu_read_unlock();
        kfree_skb(skb);
        return 0;
 }
 
 static int ip4ip6_rcv(struct sk_buff *skb)
 {
-       return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
-                          ip4ip6_dscp_ecn_decapsulate);
+       return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
+                         ip4ip6_dscp_ecn_decapsulate);
 }
 
 static int ip6ip6_rcv(struct sk_buff *skb)
 {
-       return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
-                          ip6ip6_dscp_ecn_decapsulate);
+       return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
+                         ip6ip6_dscp_ecn_decapsulate);
 }
 
 struct ipv6_tel_txoption {
@@ -918,13 +979,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
 
 /**
- * ip6_tnl_xmit2 - encapsulate packet and send
+ * ip6_tnl_xmit - encapsulate packet and send
  *   @skb: the outgoing socket buffer
  *   @dev: the outgoing tunnel device
  *   @dsfield: dscp code for outer header
- *   @fl: flow of tunneled packet
+ *   @fl6: flow of tunneled packet
  *   @encap_limit: encapsulation limit
  *   @pmtu: Path MTU is stored if packet is too big
+ *   @proto: next header value
  *
  * Description:
  *   Build new header and do some sanity checks on the packet before sending
@@ -936,12 +998,9 @@ EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
  *   %-EMSGSIZE message too big. return mtu in this case.
  **/
 
-static int ip6_tnl_xmit2(struct sk_buff *skb,
-                        struct net_device *dev,
-                        __u8 dsfield,
-                        struct flowi6 *fl6,
-                        int encap_limit,
-                        __u32 *pmtu)
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+                struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
+                __u8 proto)
 {
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = t->net;
@@ -952,7 +1011,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        struct net_device *tdev;
        int mtu;
        unsigned int max_headroom = sizeof(struct ipv6hdr);
-       u8 proto;
        int err = -1;
 
        /* NBMA tunnel */
@@ -1014,12 +1072,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                mtu = IPV6_MIN_MTU;
        if (skb_dst(skb))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
                goto tx_err_dst_release;
        }
 
+       if (t->err_count > 0) {
+               if (time_before(jiffies,
+                               t->err_time + IP6TUNNEL_ERR_TIMEO)) {
+                       t->err_count--;
+
+                       dst_link_failure(skb);
+               } else {
+                       t->err_count = 0;
+               }
+       }
+
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
 
        /*
@@ -1045,9 +1114,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
        skb_dst_set(skb, dst);
 
-       skb->transport_header = skb->network_header;
-
-       proto = fl6->flowi6_proto;
        if (encap_limit >= 0) {
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
@@ -1058,6 +1124,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                skb->encapsulation = 1;
        }
 
+       max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+                       + dst->header_len;
+       if (max_headroom > dev->needed_headroom)
+               dev->needed_headroom = max_headroom;
+
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
@@ -1076,6 +1147,7 @@ tx_err_dst_release:
        dst_release(dst);
        return err;
 }
+EXPORT_SYMBOL(ip6_tnl_xmit);
 
 static inline int
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1099,7 +1171,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_IPIP;
 
        dsfield = ipv4_get_dsfield(iph);
 
@@ -1109,7 +1180,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
 
-       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+                          IPPROTO_IPIP);
        if (err != 0) {
                /* XXX: send ICMP error even if DF is not set. */
                if (err == -EMSGSIZE)
@@ -1153,7 +1225,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_IPV6;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -1163,7 +1234,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
 
-       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+                          IPPROTO_IPV6);
        if (err != 0) {
                if (err == -EMSGSIZE)
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -1174,7 +1246,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 static netdev_tx_t
-ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
        struct net_device_stats *stats = &t->dev->stats;
@@ -1370,6 +1442,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
+       memset(&p1, 0, sizeof(p1));
+
        switch (cmd) {
        case SIOCGETTUNNEL:
                if (dev == ip6n->fb_tnl_dev) {
@@ -1464,8 +1538,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  *   %-EINVAL if mtu too small
  **/
 
-static int
-ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct ip6_tnl *tnl = netdev_priv(dev);
 
@@ -1481,6 +1554,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
        dev->mtu = new_mtu;
        return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_change_mtu);
 
 int ip6_tnl_get_iflink(const struct net_device *dev)
 {
@@ -1493,7 +1567,7 @@ EXPORT_SYMBOL(ip6_tnl_get_iflink);
 static const struct net_device_ops ip6_tnl_netdev_ops = {
        .ndo_init       = ip6_tnl_dev_init,
        .ndo_uninit     = ip6_tnl_dev_uninit,
-       .ndo_start_xmit = ip6_tnl_xmit,
+       .ndo_start_xmit = ip6_tnl_start_xmit,
        .ndo_do_ioctl   = ip6_tnl_ioctl,
        .ndo_change_mtu = ip6_tnl_change_mtu,
        .ndo_get_stats  = ip6_get_stats,
@@ -1549,13 +1623,25 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
                return -ENOMEM;
 
        ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
-       if (ret) {
-               free_percpu(dev->tstats);
-               dev->tstats = NULL;
-               return ret;
-       }
+       if (ret)
+               goto free_stats;
+
+       ret = gro_cells_init(&t->gro_cells, dev);
+       if (ret)
+               goto destroy_dst;
+
+       t->hlen = 0;
+       t->tun_hlen = 0;
 
        return 0;
+
+destroy_dst:
+       dst_cache_destroy(&t->dst_cache);
+free_stats:
+       free_percpu(dev->tstats);
+       dev->tstats = NULL;
+
+       return ret;
 }
 
 /**
index a10e77103c88dfc952f80c645a7b87c57b8f6dbf..f2e2013f834621fe2598d9dd9b452b83cd4f9018 100644 (file)
@@ -1984,10 +1984,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
 
 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                        IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                        IPSTATS_MIB_OUTOCTETS, skb->len);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                       IPSTATS_MIB_OUTFORWDATAGRAMS);
+       __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                       IPSTATS_MIB_OUTOCTETS, skb->len);
        return dst_output(net, sk, skb);
 }
 
@@ -2268,7 +2268,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
-       if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
+       if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
                return -EMSGSIZE;
 
        rtm->rtm_type = RTN_MULTICAST;
@@ -2411,7 +2411,7 @@ static int mr6_msgsize(bool unresolved, int maxvif)
                      + nla_total_size(0)       /* RTA_MULTIPATH */
                      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
                                                /* RTA_MFC_STATS */
-                     + nla_total_size(sizeof(struct rta_mfc_stats))
+                     + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
                ;
 
        return len;
index 4ff4b29894ebfec583c9846e01b599743afcd70f..a9895e15ee9c270f13842121b7dc66ed97a71135 100644 (file)
@@ -473,7 +473,7 @@ sticky_done:
                struct msghdr msg;
                struct flowi6 fl6;
                struct sockcm_cookie sockc_junk;
-               int junk;
+               struct ipcm6_cookie ipc6;
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -503,9 +503,9 @@ sticky_done:
 
                msg.msg_controllen = optlen;
                msg.msg_control = (void *)(opt+1);
+               ipc6.opt = opt;
 
-               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
-                                            &junk, &junk, &sockc_junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk);
                if (retv)
                        goto done;
 update:
index 86b67b70b62677ac014577f58d0bdafdd18ecf9e..63e06c3dd3191dfdad5a422310536a7d76c7032d 100644 (file)
@@ -39,34 +39,12 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("IPv6 packet filter");
 
-/*#define DEBUG_IP_FIREWALL*/
-/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
-/*#define DEBUG_IP_FIREWALL_USER*/
-
-#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) pr_info(format , ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) pr_info(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
 #ifdef CONFIG_NETFILTER_DEBUG
 #define IP_NF_ASSERT(x)        WARN_ON(!(x))
 #else
 #define IP_NF_ASSERT(x)
 #endif
 
-#if 0
-/* All the better to debug you with... */
-#define static
-#define inline
-#endif
-
 void *ip6t_alloc_initial_table(const struct xt_table *info)
 {
        return xt_alloc_initial_table(ip6t, IP6T);
@@ -100,35 +78,18 @@ ip6_packet_match(const struct sk_buff *skb,
        if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
                                       &ip6info->src), IP6T_INV_SRCIP) ||
            FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
-                                      &ip6info->dst), IP6T_INV_DSTIP)) {
-               dprintf("Source or dest mismatch.\n");
-/*
-               dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
-                       ipinfo->smsk.s_addr, ipinfo->src.s_addr,
-                       ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
-               dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
-                       ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
-                       ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
+                                      &ip6info->dst), IP6T_INV_DSTIP))
                return false;
-       }
 
        ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
 
-       if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
-               dprintf("VIA in mismatch (%s vs %s).%s\n",
-                       indev, ip6info->iniface,
-                       ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
+       if (FWINV(ret != 0, IP6T_INV_VIA_IN))
                return false;
-       }
 
        ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
 
-       if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
-               dprintf("VIA out mismatch (%s vs %s).%s\n",
-                       outdev, ip6info->outiface,
-                       ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
+       if (FWINV(ret != 0, IP6T_INV_VIA_OUT))
                return false;
-       }
 
 /* ... might want to do something with class and flowlabel here ... */
 
@@ -145,11 +106,6 @@ ip6_packet_match(const struct sk_buff *skb,
                }
                *fragoff = _frag_off;
 
-               dprintf("Packet protocol %hi ?= %s%hi.\n",
-                               protohdr,
-                               ip6info->invflags & IP6T_INV_PROTO ? "!":"",
-                               ip6info->proto);
-
                if (ip6info->proto == protohdr) {
                        if (ip6info->invflags & IP6T_INV_PROTO)
                                return false;
@@ -169,16 +125,11 @@ ip6_packet_match(const struct sk_buff *skb,
 static bool
 ip6_checkentry(const struct ip6t_ip6 *ipv6)
 {
-       if (ipv6->flags & ~IP6T_F_MASK) {
-               duprintf("Unknown flag bits set: %08X\n",
-                        ipv6->flags & ~IP6T_F_MASK);
+       if (ipv6->flags & ~IP6T_F_MASK)
                return false;
-       }
-       if (ipv6->invflags & ~IP6T_INV_MASK) {
-               duprintf("Unknown invflag bits set: %08X\n",
-                        ipv6->invflags & ~IP6T_INV_MASK);
+       if (ipv6->invflags & ~IP6T_INV_MASK)
                return false;
-       }
+
        return true;
 }
 
@@ -446,13 +397,21 @@ ip6t_do_table(struct sk_buff *skb,
        xt_write_recseq_end(addend);
        local_bh_enable();
 
-#ifdef DEBUG_ALLOW_ALL
-       return NF_ACCEPT;
-#else
        if (acpar.hotdrop)
                return NF_DROP;
        else return verdict;
-#endif
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+                            const struct ip6t_entry *target)
+{
+       struct ip6t_entry *iter;
+
+       xt_entry_foreach(iter, t->entries, t->size) {
+                if (iter == target)
+                       return true;
+       }
+       return false;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -480,11 +439,9 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                = (void *)ip6t_get_target_c(e);
                        int visited = e->comefrom & (1 << hook);
 
-                       if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
-                               pr_err("iptables: loop hook %u pos %u %08X.\n",
-                                      hook, pos, e->comefrom);
+                       if (e->comefrom & (1 << NF_INET_NUMHOOKS))
                                return 0;
-                       }
+
                        e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
                        /* Unconditional return/END. */
@@ -496,26 +453,13 @@ mark_source_chains(const struct xt_table_info *newinfo,
 
                                if ((strcmp(t->target.u.user.name,
                                            XT_STANDARD_TARGET) == 0) &&
-                                   t->verdict < -NF_MAX_VERDICT - 1) {
-                                       duprintf("mark_source_chains: bad "
-                                               "negative verdict (%i)\n",
-                                                               t->verdict);
+                                   t->verdict < -NF_MAX_VERDICT - 1)
                                        return 0;
-                               }
 
                                /* Return: backtrack through the last
                                   big jump. */
                                do {
                                        e->comefrom ^= (1<<NF_INET_NUMHOOKS);
-#ifdef DEBUG_IP_FIREWALL_USER
-                                       if (e->comefrom
-                                           & (1 << NF_INET_NUMHOOKS)) {
-                                               duprintf("Back unset "
-                                                        "on hook %u "
-                                                        "rule %u\n",
-                                                        hook, pos);
-                                       }
-#endif
                                        oldpos = pos;
                                        pos = e->counters.pcnt;
                                        e->counters.pcnt = 0;
@@ -532,6 +476,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                size = e->next_offset;
                                e = (struct ip6t_entry *)
                                        (entry0 + pos + size);
+                               if (pos + size >= newinfo->size)
+                                       return 0;
                                e->counters.pcnt = pos;
                                pos += size;
                        } else {
@@ -540,19 +486,16 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           XT_STANDARD_TARGET) == 0 &&
                                    newpos >= 0) {
-                                       if (newpos > newinfo->size -
-                                               sizeof(struct ip6t_entry)) {
-                                               duprintf("mark_source_chains: "
-                                                       "bad verdict (%i)\n",
-                                                               newpos);
-                                               return 0;
-                                       }
                                        /* This a jump; chase it. */
-                                       duprintf("Jump rule %u -> %u\n",
-                                                pos, newpos);
+                                       e = (struct ip6t_entry *)
+                                               (entry0 + newpos);
+                                       if (!find_jump_target(newinfo, e))
+                                               return 0;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
+                                       if (newpos >= newinfo->size)
+                                               return 0;
                                }
                                e = (struct ip6t_entry *)
                                        (entry0 + newpos);
@@ -560,8 +503,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-next:
-               duprintf("Finished chain %u\n", hook);
+next:          ;
        }
        return 1;
 }
@@ -579,41 +521,15 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
        module_put(par.match->me);
 }
 
-static int
-check_entry(const struct ip6t_entry *e)
-{
-       const struct xt_entry_target *t;
-
-       if (!ip6_checkentry(&e->ipv6))
-               return -EINVAL;
-
-       if (e->target_offset + sizeof(struct xt_entry_target) >
-           e->next_offset)
-               return -EINVAL;
-
-       t = ip6t_get_target_c(e);
-       if (e->target_offset + t->u.target_size > e->next_offset)
-               return -EINVAL;
-
-       return 0;
-}
-
 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 {
        const struct ip6t_ip6 *ipv6 = par->entryinfo;
-       int ret;
 
        par->match     = m->u.kernel.match;
        par->matchinfo = m->data;
 
-       ret = xt_check_match(par, m->u.match_size - sizeof(*m),
-                            ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
-       if (ret < 0) {
-               duprintf("ip_tables: check failed for `%s'.\n",
-                        par.match->name);
-               return ret;
-       }
-       return 0;
+       return xt_check_match(par, m->u.match_size - sizeof(*m),
+                             ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
 }
 
 static int
@@ -624,10 +540,9 @@ find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 
        match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
                                      m->u.user.revision);
-       if (IS_ERR(match)) {
-               duprintf("find_check_match: `%s' not found\n", m->u.user.name);
+       if (IS_ERR(match))
                return PTR_ERR(match);
-       }
+
        m->u.kernel.match = match;
 
        ret = check_match(m, par);
@@ -652,17 +567,11 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
                .hook_mask = e->comefrom,
                .family    = NFPROTO_IPV6,
        };
-       int ret;
 
        t = ip6t_get_target(e);
-       ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
-             e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
-       if (ret < 0) {
-               duprintf("ip_tables: check failed for `%s'.\n",
-                        t->u.kernel.target->name);
-               return ret;
-       }
-       return 0;
+       return xt_check_target(&par, t->u.target_size - sizeof(*t),
+                              e->ipv6.proto,
+                              e->ipv6.invflags & IP6T_INV_PROTO);
 }
 
 static int
@@ -675,10 +584,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
        unsigned int j;
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
+       unsigned long pcnt;
 
-       e->counters.pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(e->counters.pcnt))
+       pcnt = xt_percpu_counter_alloc();
+       if (IS_ERR_VALUE(pcnt))
                return -ENOMEM;
+       e->counters.pcnt = pcnt;
 
        j = 0;
        mtpar.net       = net;
@@ -697,7 +608,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
        target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
                ret = PTR_ERR(target);
                goto cleanup_matches;
        }
@@ -750,19 +660,18 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
 
        if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
            (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p\n", e);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset
-           < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+           < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
                return -EINVAL;
-       }
 
-       err = check_entry(e);
+       if (!ip6_checkentry(&e->ipv6))
+               return -EINVAL;
+
+       err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+                                    e->next_offset);
        if (err)
                return err;
 
@@ -773,12 +682,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
                if ((unsigned char *)e - base == hook_entries[h])
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
-                       if (!check_underflow(e)) {
-                               pr_debug("Underflows must be unconditional and "
-                                        "use the STANDARD target with "
-                                        "ACCEPT/DROP\n");
+                       if (!check_underflow(e))
                                return -EINVAL;
-                       }
+
                        newinfo->underflow[h] = underflows[h];
                }
        }
@@ -830,7 +736,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                newinfo->underflow[i] = 0xFFFFFFFF;
        }
 
-       duprintf("translate_table: size %u\n", newinfo->size);
        i = 0;
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -847,27 +752,18 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                        ++newinfo->stacksize;
        }
 
-       if (i != repl->num_entries) {
-               duprintf("translate_table: %u not %u entries\n",
-                        i, repl->num_entries);
+       if (i != repl->num_entries)
                return -EINVAL;
-       }
 
        /* Check hooks all assigned */
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                /* Only hooks which are valid */
                if (!(repl->valid_hooks & (1 << i)))
                        continue;
-               if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, repl->hook_entry[i]);
+               if (newinfo->hook_entry[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
-               if (newinfo->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, repl->underflow[i]);
+               if (newinfo->underflow[i] == 0xFFFFFFFF)
                        return -EINVAL;
-               }
        }
 
        if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
@@ -1095,11 +991,8 @@ static int get_info(struct net *net, void __user *user,
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct ip6t_getinfo)) {
-               duprintf("length %u != %zu\n", *len,
-                        sizeof(struct ip6t_getinfo));
+       if (*len != sizeof(struct ip6t_getinfo))
                return -EINVAL;
-       }
 
        if (copy_from_user(name, user, sizeof(name)) != 0)
                return -EFAULT;
@@ -1157,31 +1050,24 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
        struct ip6t_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
-       if (*len != sizeof(struct ip6t_get_entries) + get.size) {
-               duprintf("get_entries: %u != %zu\n",
-                        *len, sizeof(get) + get.size);
+       if (*len != sizeof(struct ip6t_get_entries) + get.size)
                return -EINVAL;
-       }
+
        get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR_OR_NULL(t)) {
                struct xt_table_info *private = t->private;
-               duprintf("t->private->number = %u\n", private->number);
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
-               else {
-                       duprintf("get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               else
                        ret = -EAGAIN;
-               }
+
                module_put(t->me);
                xt_table_unlock(t);
        } else
@@ -1217,8 +1103,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 
        /* You lied! */
        if (valid_hooks != t->valid_hooks) {
-               duprintf("Valid hook crap: %08X vs %08X\n",
-                        valid_hooks, t->valid_hooks);
                ret = -EINVAL;
                goto put_module;
        }
@@ -1228,8 +1112,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
                goto put_module;
 
        /* Update module usage count based on number of rules */
-       duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-               oldinfo->number, oldinfo->initial_entries, newinfo->number);
        if ((oldinfo->number > oldinfo->initial_entries) ||
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
@@ -1298,8 +1180,6 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("ip_tables: Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, tmp.counters);
        if (ret)
@@ -1321,55 +1201,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
        unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
-       unsigned int num_counters;
-       char *name;
-       int size;
-       void *ptmp;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
        struct ip6t_entry *iter;
        unsigned int addend;
-#ifdef CONFIG_COMPAT
-       struct compat_xt_counters_info compat_tmp;
 
-       if (compat) {
-               ptmp = &compat_tmp;
-               size = sizeof(struct compat_xt_counters_info);
-       } else
-#endif
-       {
-               ptmp = &tmp;
-               size = sizeof(struct xt_counters_info);
-       }
-
-       if (copy_from_user(ptmp, user, size) != 0)
-               return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-       if (compat) {
-               num_counters = compat_tmp.num_counters;
-               name = compat_tmp.name;
-       } else
-#endif
-       {
-               num_counters = tmp.num_counters;
-               name = tmp.name;
-       }
-
-       if (len != size + num_counters * sizeof(struct xt_counters))
-               return -EINVAL;
-
-       paddc = vmalloc(len - size);
-       if (!paddc)
-               return -ENOMEM;
-
-       if (copy_from_user(paddc, user + size, len - size) != 0) {
-               ret = -EFAULT;
-               goto free;
-       }
-
-       t = xt_find_table_lock(net, AF_INET6, name);
+       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       if (IS_ERR(paddc))
+               return PTR_ERR(paddc);
+       t = xt_find_table_lock(net, AF_INET6, tmp.name);
        if (IS_ERR_OR_NULL(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
@@ -1377,7 +1218,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
 
        local_bh_disable();
        private = t->private;
-       if (private->number != num_counters) {
+       if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
@@ -1456,7 +1297,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
 
 static int
 compat_find_calc_match(struct xt_entry_match *m,
-                      const char *name,
                       const struct ip6t_ip6 *ipv6,
                       int *size)
 {
@@ -1464,11 +1304,9 @@ compat_find_calc_match(struct xt_entry_match *m,
 
        match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
                                      m->u.user.revision);
-       if (IS_ERR(match)) {
-               duprintf("compat_check_calc_match: `%s' not found\n",
-                        m->u.user.name);
+       if (IS_ERR(match))
                return PTR_ERR(match);
-       }
+
        m->u.kernel.match = match;
        *size += xt_compat_match_offset(match);
        return 0;
@@ -1491,35 +1329,29 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
                                  struct xt_table_info *newinfo,
                                  unsigned int *size,
                                  const unsigned char *base,
-                                 const unsigned char *limit,
-                                 const unsigned int *hook_entries,
-                                 const unsigned int *underflows,
-                                 const char *name)
+                                 const unsigned char *limit)
 {
        struct xt_entry_match *ematch;
        struct xt_entry_target *t;
        struct xt_target *target;
        unsigned int entry_offset;
        unsigned int j;
-       int ret, off, h;
+       int ret, off;
 
-       duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
            (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
-           (unsigned char *)e + e->next_offset > limit) {
-               duprintf("Bad offset %p, limit = %p\n", e, limit);
+           (unsigned char *)e + e->next_offset > limit)
                return -EINVAL;
-       }
 
        if (e->next_offset < sizeof(struct compat_ip6t_entry) +
-                            sizeof(struct compat_xt_entry_target)) {
-               duprintf("checking: element %p size %u\n",
-                        e, e->next_offset);
+                            sizeof(struct compat_xt_entry_target))
                return -EINVAL;
-       }
 
-       /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct ip6t_entry *)e);
+       if (!ip6_checkentry(&e->ipv6))
+               return -EINVAL;
+
+       ret = xt_compat_check_entry_offsets(e, e->elems,
+                                           e->target_offset, e->next_offset);
        if (ret)
                return ret;
 
@@ -1527,7 +1359,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
+               ret = compat_find_calc_match(ematch, &e->ipv6, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
@@ -1537,8 +1369,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
                                        t->u.user.revision);
        if (IS_ERR(target)) {
-               duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-                        t->u.user.name);
                ret = PTR_ERR(target);
                goto release_matches;
        }
@@ -1550,17 +1380,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        if (ret)
                goto out;
 
-       /* Check hooks & underflows */
-       for (h = 0; h < NF_INET_NUMHOOKS; h++) {
-               if ((unsigned char *)e - base == hook_entries[h])
-                       newinfo->hook_entry[h] = hook_entries[h];
-               if ((unsigned char *)e - base == underflows[h])
-                       newinfo->underflow[h] = underflows[h];
-       }
-
-       /* Clear counters and comefrom */
-       memset(&e->counters, 0, sizeof(e->counters));
-       e->comefrom = 0;
        return 0;
 
 out:
@@ -1574,18 +1393,17 @@ release_matches:
        return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
-                           unsigned int *size, const char *name,
+                           unsigned int *size,
                            struct xt_table_info *newinfo, unsigned char *base)
 {
        struct xt_entry_target *t;
        struct ip6t_entry *de;
        unsigned int origsize;
-       int ret, h;
+       int h;
        struct xt_entry_match *ematch;
 
-       ret = 0;
        origsize = *size;
        de = (struct ip6t_entry *)*dstptr;
        memcpy(de, e, sizeof(struct ip6t_entry));
@@ -1594,11 +1412,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
        *dstptr += sizeof(struct ip6t_entry);
        *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
 
-       xt_ematch_foreach(ematch, e) {
-               ret = xt_compat_match_from_user(ematch, dstptr, size);
-               if (ret != 0)
-                       return ret;
-       }
+       xt_ematch_foreach(ematch, e)
+               xt_compat_match_from_user(ematch, dstptr, size);
+
        de->target_offset = e->target_offset - (origsize - *size);
        t = compat_ip6t_get_target(e);
        xt_compat_target_from_user(t, dstptr, size);
@@ -1610,183 +1426,79 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
                if ((unsigned char *)de - base < newinfo->underflow[h])
                        newinfo->underflow[h] -= origsize - *size;
        }
-       return ret;
-}
-
-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
-                             const char *name)
-{
-       unsigned int j;
-       int ret = 0;
-       struct xt_mtchk_param mtpar;
-       struct xt_entry_match *ematch;
-
-       e->counters.pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(e->counters.pcnt))
-               return -ENOMEM;
-       j = 0;
-       mtpar.net       = net;
-       mtpar.table     = name;
-       mtpar.entryinfo = &e->ipv6;
-       mtpar.hook_mask = e->comefrom;
-       mtpar.family    = NFPROTO_IPV6;
-       xt_ematch_foreach(ematch, e) {
-               ret = check_match(ematch, &mtpar);
-               if (ret != 0)
-                       goto cleanup_matches;
-               ++j;
-       }
-
-       ret = check_target(e, net, name);
-       if (ret)
-               goto cleanup_matches;
-       return 0;
-
- cleanup_matches:
-       xt_ematch_foreach(ematch, e) {
-               if (j-- == 0)
-                       break;
-               cleanup_match(ematch, net);
-       }
-
-       xt_percpu_counter_free(e->counters.pcnt);
-
-       return ret;
 }
 
 static int
 translate_compat_table(struct net *net,
-                      const char *name,
-                      unsigned int valid_hooks,
                       struct xt_table_info **pinfo,
                       void **pentry0,
-                      unsigned int total_size,
-                      unsigned int number,
-                      unsigned int *hook_entries,
-                      unsigned int *underflows)
+                      const struct compat_ip6t_replace *compatr)
 {
        unsigned int i, j;
        struct xt_table_info *newinfo, *info;
        void *pos, *entry0, *entry1;
        struct compat_ip6t_entry *iter0;
-       struct ip6t_entry *iter1;
+       struct ip6t_replace repl;
        unsigned int size;
        int ret = 0;
 
        info = *pinfo;
        entry0 = *pentry0;
-       size = total_size;
-       info->number = number;
-
-       /* Init all hooks to impossible value. */
-       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               info->hook_entry[i] = 0xFFFFFFFF;
-               info->underflow[i] = 0xFFFFFFFF;
-       }
+       size = compatr->size;
+       info->number = compatr->num_entries;
 
-       duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET6);
-       xt_compat_init_offsets(AF_INET6, number);
+       xt_compat_init_offsets(AF_INET6, compatr->num_entries);
        /* Walk through entries, checking offsets. */
-       xt_entry_foreach(iter0, entry0, total_size) {
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
                                                        entry0,
-                                                       entry0 + total_size,
-                                                       hook_entries,
-                                                       underflows,
-                                                       name);
+                                                       entry0 + compatr->size);
                if (ret != 0)
                        goto out_unlock;
                ++j;
        }
 
        ret = -EINVAL;
-       if (j != number) {
-               duprintf("translate_compat_table: %u not %u entries\n",
-                        j, number);
+       if (j != compatr->num_entries)
                goto out_unlock;
-       }
-
-       /* Check hooks all assigned */
-       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               /* Only hooks which are valid */
-               if (!(valid_hooks & (1 << i)))
-                       continue;
-               if (info->hook_entry[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid hook entry %u %u\n",
-                                i, hook_entries[i]);
-                       goto out_unlock;
-               }
-               if (info->underflow[i] == 0xFFFFFFFF) {
-                       duprintf("Invalid underflow %u %u\n",
-                                i, underflows[i]);
-                       goto out_unlock;
-               }
-       }
 
        ret = -ENOMEM;
        newinfo = xt_alloc_table_info(size);
        if (!newinfo)
                goto out_unlock;
 
-       newinfo->number = number;
+       newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-               newinfo->hook_entry[i] = info->hook_entry[i];
-               newinfo->underflow[i] = info->underflow[i];
+               newinfo->hook_entry[i] = compatr->hook_entry[i];
+               newinfo->underflow[i] = compatr->underflow[i];
        }
        entry1 = newinfo->entries;
        pos = entry1;
-       size = total_size;
-       xt_entry_foreach(iter0, entry0, total_size) {
-               ret = compat_copy_entry_from_user(iter0, &pos, &size,
-                                                 name, newinfo, entry1);
-               if (ret != 0)
-                       break;
-       }
+       size = compatr->size;
+       xt_entry_foreach(iter0, entry0, compatr->size)
+               compat_copy_entry_from_user(iter0, &pos, &size,
+                                           newinfo, entry1);
+
+       /* all module references in entry0 are now gone. */
        xt_compat_flush_offsets(AF_INET6);
        xt_compat_unlock(AF_INET6);
-       if (ret)
-               goto free_newinfo;
 
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry1))
-               goto free_newinfo;
+       memcpy(&repl, compatr, sizeof(*compatr));
 
-       i = 0;
-       xt_entry_foreach(iter1, entry1, newinfo->size) {
-               ret = compat_check_entry(iter1, net, name);
-               if (ret != 0)
-                       break;
-               ++i;
-               if (strcmp(ip6t_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
-       }
-       if (ret) {
-               /*
-                * The first i matches need cleanup_entry (calls ->destroy)
-                * because they had called ->check already. The other j-i
-                * entries need only release.
-                */
-               int skip = i;
-               j -= i;
-               xt_entry_foreach(iter0, entry0, newinfo->size) {
-                       if (skip-- > 0)
-                               continue;
-                       if (j-- == 0)
-                               break;
-                       compat_release_entry(iter0);
-               }
-               xt_entry_foreach(iter1, entry1, newinfo->size) {
-                       if (i-- == 0)
-                               break;
-                       cleanup_entry(iter1, net);
-               }
-               xt_free_table_info(newinfo);
-               return ret;
+       for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+               repl.hook_entry[i] = newinfo->hook_entry[i];
+               repl.underflow[i] = newinfo->underflow[i];
        }
 
+       repl.num_counters = 0;
+       repl.counters = NULL;
+       repl.size = newinfo->size;
+       ret = translate_table(net, newinfo, entry1, &repl);
+       if (ret)
+               goto free_newinfo;
+
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
@@ -1794,17 +1506,16 @@ translate_compat_table(struct net *net,
 
 free_newinfo:
        xt_free_table_info(newinfo);
-out:
-       xt_entry_foreach(iter0, entry0, total_size) {
+       return ret;
+out_unlock:
+       xt_compat_flush_offsets(AF_INET6);
+       xt_compat_unlock(AF_INET6);
+       xt_entry_foreach(iter0, entry0, compatr->size) {
                if (j-- == 0)
                        break;
                compat_release_entry(iter0);
        }
        return ret;
-out_unlock:
-       xt_compat_flush_offsets(AF_INET6);
-       xt_compat_unlock(AF_INET6);
-       goto out;
 }
 
 static int
@@ -1820,8 +1531,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -EFAULT;
 
        /* overflow check */
-       if (tmp.size >= INT_MAX / num_possible_cpus())
-               return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
        if (tmp.num_counters == 0)
@@ -1840,15 +1549,10 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                goto free_newinfo;
        }
 
-       ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
-                                    &newinfo, &loc_cpu_entry, tmp.size,
-                                    tmp.num_entries, tmp.hook_entry,
-                                    tmp.underflow);
+       ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
-       duprintf("compat_do_replace: Translated table\n");
-
        ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
                           tmp.num_counters, compat_ptr(tmp.counters));
        if (ret)
@@ -1882,7 +1586,6 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
                break;
 
        default:
-               duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -1932,19 +1635,15 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        struct compat_ip6t_get_entries get;
        struct xt_table *t;
 
-       if (*len < sizeof(get)) {
-               duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+       if (*len < sizeof(get))
                return -EINVAL;
-       }
 
        if (copy_from_user(&get, uptr, sizeof(get)) != 0)
                return -EFAULT;
 
-       if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
-               duprintf("compat_get_entries: %u != %zu\n",
-                        *len, sizeof(get) + get.size);
+       if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
                return -EINVAL;
-       }
+
        get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(AF_INET6);
@@ -1952,16 +1651,13 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        if (!IS_ERR_OR_NULL(t)) {
                const struct xt_table_info *private = t->private;
                struct xt_table_info info;
-               duprintf("t->private->number = %u\n", private->number);
                ret = compat_table_info(private, &info);
-               if (!ret && get.size == info.size) {
+               if (!ret && get.size == info.size)
                        ret = compat_copy_entries_to_user(private->size,
                                                          t, uptr->entrytable);
-               } else if (!ret) {
-                       duprintf("compat_get_entries: I've got %u not %u!\n",
-                                private->size, get.size);
+               else if (!ret)
                        ret = -EAGAIN;
-               }
+
                xt_compat_flush_offsets(AF_INET6);
                module_put(t->me);
                xt_table_unlock(t);
@@ -2014,7 +1710,6 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                break;
 
        default:
-               duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -2066,7 +1761,6 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        }
 
        default:
-               duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
                ret = -EINVAL;
        }
 
@@ -2168,7 +1862,6 @@ icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
                /* We've been asked to examine this packet, and we
                 * can't.  Hence, no choice but to drop.
                 */
-               duprintf("Dropping evil ICMP tinygram.\n");
                par->hotdrop = true;
                return false;
        }
index 3deed5860a42510078a2377260819aa269d0bc47..06bed74cf5ee0667a86e284289ed23cafeb015cb 100644 (file)
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct ipv6hdr *
-synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
-                                      const struct in6_addr *daddr)
+synproxy_build_ip(struct net *net, struct sk_buff *skb,
+                 const struct in6_addr *saddr,
+                 const struct in6_addr *daddr)
 {
        struct ipv6hdr *iph;
 
        skb_reset_network_header(skb);
        iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
        ip6_flow_hdr(iph, 0, 0);
-       iph->hop_limit  = 64;   //XXX
+       iph->hop_limit  = net->ipv6.devconf_all->hop_limit;
        iph->nexthdr    = IPPROTO_TCP;
        iph->saddr      = *saddr;
        iph->daddr      = *daddr;
@@ -37,13 +38,12 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
 }
 
 static void
-synproxy_send_tcp(const struct synproxy_net *snet,
+synproxy_send_tcp(struct net *net,
                  const struct sk_buff *skb, struct sk_buff *nskb,
                  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
                  struct ipv6hdr *niph, struct tcphdr *nth,
                  unsigned int tcp_hdr_size)
 {
-       struct net *net = nf_ct_net(snet->tmpl);
        struct dst_entry *dst;
        struct flowi6 fl6;
 
@@ -60,7 +60,7 @@ synproxy_send_tcp(const struct synproxy_net *snet,
        fl6.fl6_dport = nth->dest;
        security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6));
        dst = ip6_route_output(net, NULL, &fl6);
-       if (dst == NULL || dst->error) {
+       if (dst->error) {
                dst_release(dst);
                goto free_nskb;
        }
@@ -84,7 +84,7 @@ free_nskb:
 }
 
 static void
-synproxy_send_client_synack(const struct synproxy_net *snet,
+synproxy_send_client_synack(struct net *net,
                            const struct sk_buff *skb, const struct tcphdr *th,
                            const struct synproxy_options *opts)
 {
@@ -103,7 +103,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+       niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -121,15 +121,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_syn(const struct synproxy_net *snet,
+synproxy_send_server_syn(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts, u32 recv_seq)
 {
+       struct synproxy_net *snet = synproxy_pernet(net);
        struct sk_buff *nskb;
        struct ipv6hdr *iph, *niph;
        struct tcphdr *nth;
@@ -144,7 +145,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+       niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -165,12 +166,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+       synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
                          niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_ack(const struct synproxy_net *snet,
+synproxy_send_server_ack(struct net *net,
                         const struct ip_ct_tcp *state,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts)
@@ -189,7 +190,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+       niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -205,11 +206,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_client_ack(const struct synproxy_net *snet,
+synproxy_send_client_ack(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts)
 {
@@ -227,7 +228,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+       niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -243,15 +244,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
 static bool
-synproxy_recv_client_ack(const struct synproxy_net *snet,
+synproxy_recv_client_ack(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         struct synproxy_options *opts, u32 recv_seq)
 {
+       struct synproxy_net *snet = synproxy_pernet(net);
        int mss;
 
        mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -267,7 +269,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
        if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
                synproxy_check_timestamp_cookie(opts);
 
-       synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+       synproxy_send_server_syn(net, skb, th, opts, recv_seq);
        return true;
 }
 
@@ -275,7 +277,8 @@ static unsigned int
 synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_synproxy_info *info = par->targinfo;
-       struct synproxy_net *snet = synproxy_pernet(par->net);
+       struct net *net = par->net;
+       struct synproxy_net *snet = synproxy_pernet(net);
        struct synproxy_options opts = {};
        struct tcphdr *th, _th;
 
@@ -304,12 +307,12 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                                          XT_SYNPROXY_OPT_SACK_PERM |
                                          XT_SYNPROXY_OPT_ECN);
 
-               synproxy_send_client_synack(snet, skb, th, &opts);
+               synproxy_send_client_synack(net, skb, th, &opts);
                return NF_DROP;
 
        } else if (th->ack && !(th->fin || th->rst || th->syn)) {
                /* ACK from client */
-               synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+               synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
                return NF_DROP;
        }
 
@@ -320,7 +323,8 @@ static unsigned int ipv6_synproxy_hook(void *priv,
                                       struct sk_buff *skb,
                                       const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(nhs->net);
+       struct net *net = nhs->net;
+       struct synproxy_net *snet = synproxy_pernet(net);
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
@@ -384,7 +388,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
                         * therefore we need to add 1 to make the SYN sequence
                         * number match the one of first SYN.
                         */
-                       if (synproxy_recv_client_ack(snet, skb, th, &opts,
+                       if (synproxy_recv_client_ack(net, skb, th, &opts,
                                                     ntohl(th->seq) + 1))
                                this_cpu_inc(snet->stats->cookie_retrans);
 
@@ -410,12 +414,12 @@ static unsigned int ipv6_synproxy_hook(void *priv,
                                  XT_SYNPROXY_OPT_SACK_PERM);
 
                swap(opts.tsval, opts.tsecr);
-               synproxy_send_server_ack(snet, state, skb, th, &opts);
+               synproxy_send_server_ack(net, state, skb, th, &opts);
 
                nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
 
                swap(opts.tsval, opts.tsecr);
-               synproxy_send_client_ack(snet, skb, th, &opts);
+               synproxy_send_client_ack(net, skb, th, &opts);
 
                consume_skb(skb);
                return NF_STOLEN;
index 4709f657b7b6be1943bab3f1bf07c5c47e386df5..a5400223fd743ff012ab7318c7ad6845c1f6b36c 100644 (file)
@@ -158,7 +158,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
        fl6.fl6_dport = otcph->source;
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
        dst = ip6_route_output(net, NULL, &fl6);
-       if (dst == NULL || dst->error) {
+       if (dst->error) {
                dst_release(dst);
                return;
        }
index da1cff79e447fd7928bb63552651f94f084a105a..3ee3e444a66befb01e1a2f124417264361d4a8f6 100644 (file)
@@ -58,11 +58,11 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int iif = 0;
        struct flowi6 fl6;
        int err;
-       int hlimit;
        struct dst_entry *dst;
        struct rt6_info *rt;
        struct pingfakehdr pfh;
        struct sockcm_cookie junk = {0};
+       struct ipcm6_cookie ipc6;
 
        pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
 
@@ -139,13 +139,15 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        pfh.wcheck = 0;
        pfh.family = AF_INET6;
 
-       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       ipc6.tclass = np->tclass;
+       ipc6.dontfrag = np->dontfrag;
+       ipc6.opt = NULL;
 
        lock_sock(sk);
        err = ip6_append_data(sk, ping_getfrag, &pfh, len,
-                             0, hlimit,
-                             np->tclass, NULL, &fl6, rt,
-                             MSG_DONTWAIT, np->dontfrag, &junk);
+                             0, &ipc6, &fl6, rt,
+                             MSG_DONTWAIT, &junk);
 
        if (err) {
                ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
index b07ce21983aaf1f602b315a697520ac15a924639..896350df642361650234748965414df43adf0958 100644 (file)
@@ -746,10 +746,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct raw6_frag_vec rfv;
        struct flowi6 fl6;
        struct sockcm_cookie sockc;
+       struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
-       int hlimit = -1;
-       int tclass = -1;
-       int dontfrag = -1;
        u16 proto;
        int err;
 
@@ -770,6 +768,11 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        fl6.flowi6_mark = sk->sk_mark;
 
+       ipc6.hlimit = -1;
+       ipc6.tclass = -1;
+       ipc6.dontfrag = -1;
+       ipc6.opt = NULL;
+
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
                        return -EINVAL;
@@ -827,10 +830,9 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
+               ipc6.opt = opt;
 
-               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                           &hlimit, &tclass, &dontfrag,
-                                           &sockc);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -846,7 +848,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (!opt) {
                opt = txopt_get(np);
                opt_to_free = opt;
-               }
+       }
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
@@ -881,14 +883,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                err = PTR_ERR(dst);
                goto out;
        }
-       if (hlimit < 0)
-               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       if (ipc6.hlimit < 0)
+               ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-       if (tclass < 0)
-               tclass = np->tclass;
+       if (ipc6.tclass < 0)
+               ipc6.tclass = np->tclass;
 
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
+       if (ipc6.dontfrag < 0)
+               ipc6.dontfrag = np->dontfrag;
 
        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
@@ -897,10 +899,11 @@ back_from_confirm:
        if (inet->hdrincl)
                err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
        else {
+               ipc6.opt = opt;
                lock_sock(sk);
                err = ip6_append_data(sk, raw6_getfrag, &rfv,
-                       len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
-                       msg->msg_flags, dontfrag, &sockc);
+                       len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
+                       msg->msg_flags, &sockc);
 
                if (err)
                        ip6_flush_pending_frames(sk);
index e2ea31175ef938af102a3dae8199fb0583399baf..2160d5d009cb6e97f36f3b4319322fb52a354866 100644 (file)
@@ -145,12 +145,12 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
        if (!dev)
                goto out_rcu_unlock;
 
-       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 
        if (inet_frag_evicting(&fq->q))
                goto out_rcu_unlock;
 
-       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 
        /* Don't send error if the first segment did not arrive. */
        if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
@@ -223,8 +223,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
                        ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
        if ((unsigned int)end > IPV6_MAXPLEN) {
-               IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_INHDRERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                               IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                  ((u8 *)&fhdr->frag_off -
                                   skb_network_header(skb)));
@@ -258,8 +258,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
                        /* RFC2460 says always send parameter problem in
                         * this case. -DaveM
                         */
-                       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                        IPSTATS_MIB_INHDRERRORS);
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                       IPSTATS_MIB_INHDRERRORS);
                        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                          offsetof(struct ipv6hdr, payload_len));
                        return -1;
@@ -361,8 +361,8 @@ found:
 discard_fq:
        inet_frag_kill(&fq->q, &ip6_frags);
 err:
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                        IPSTATS_MIB_REASMFAILS);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                       IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -1;
 }
@@ -500,7 +500,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                           skb_network_header_len(head));
 
        rcu_read_lock();
-       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
        rcu_read_unlock();
        fq->q.fragments = NULL;
        fq->q.fragments_tail = NULL;
@@ -513,7 +513,7 @@ out_oom:
        net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
 out_fail:
        rcu_read_lock();
-       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
        rcu_read_unlock();
        return -1;
 }
@@ -528,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
                goto fail_hdr;
 
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
        /* Jumbo payload inhibits frag. header */
        if (hdr->payload_len == 0)
@@ -544,8 +544,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        if (!(fhdr->frag_off & htons(0xFFF9))) {
                /* It is not a fragmented frame */
                skb->transport_header += sizeof(struct frag_hdr);
-               IP6_INC_STATS_BH(net,
-                                ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
+               __IP6_INC_STATS(net,
+                               ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
 
                IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
                IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
@@ -566,13 +566,13 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return ret;
        }
 
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -1;
 
 fail_hdr:
-       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                        IPSTATS_MIB_INHDRERRORS);
+       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                       IPSTATS_MIB_INHDRERRORS);
        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
        return -1;
 }
index ed446639219c3ae8832a19a1f944e4dd6ddc6302..c42fa1deb152c6c3292d705b3d6157c32bd3f837 100644 (file)
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
        return rt;
 }
 
-static struct rt6_info *ip6_dst_alloc(struct net *net,
-                                     struct net_device *dev,
-                                     int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net,
+                              struct net_device *dev,
+                              int flags)
 {
        struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
 
        return rt;
 }
+EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
@@ -1189,7 +1190,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
        struct dst_entry *dst;
        bool any_src;
 
-       dst = l3mdev_rt6_dst_by_oif(net, fl6);
+       dst = l3mdev_get_rt6_dst(net, fl6);
        if (dst)
                return dst;
 
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+       struct dst_entry *dst;
+
        ip6_update_pmtu(skb, sock_net(sk), mtu,
                        sk->sk_bound_dev_if, sk->sk_mark);
+
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
+               return;
+
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               ip6_datagram_dst_update(sk, false);
+       bh_unlock_sock(sk);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -1756,6 +1769,37 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
        return -EINVAL;
 }
 
+static struct rt6_info *ip6_nh_lookup_table(struct net *net,
+                                           struct fib6_config *cfg,
+                                           const struct in6_addr *gw_addr)
+{
+       struct flowi6 fl6 = {
+               .flowi6_oif = cfg->fc_ifindex,
+               .daddr = *gw_addr,
+               .saddr = cfg->fc_prefsrc,
+       };
+       struct fib6_table *table;
+       struct rt6_info *rt;
+       int flags = 0;
+
+       table = fib6_get_table(net, cfg->fc_table);
+       if (!table)
+               return NULL;
+
+       if (!ipv6_addr_any(&cfg->fc_prefsrc))
+               flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+       rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
+
+       /* if table lookup failed, fall back to full lookup */
+       if (rt == net->ipv6.ip6_null_entry) {
+               ip6_rt_put(rt);
+               rt = NULL;
+       }
+
+       return rt;
+}
+
 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
 {
        struct net *net = cfg->fc_nlinfo.nl_net;
@@ -1927,7 +1971,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
                rt->rt6i_gateway = *gw_addr;
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
-                       struct rt6_info *grt;
+                       struct rt6_info *grt = NULL;
 
                        /* IPv6 strictly inhibits using not link-local
                           addresses as nexthop address.
@@ -1939,7 +1983,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
                        if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
-                       grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
+                       if (cfg->fc_table)
+                               grt = ip6_nh_lookup_table(net, cfg, gw_addr);
+
+                       if (!grt)
+                               grt = rt6_lookup(net, gw_addr, NULL,
+                                                cfg->fc_ifindex, 1);
 
                        err = -EHOSTUNREACH;
                        if (!grt)
index 83384308d032492fff85c04d4dca196c1bb690fc..a13d8c114ccb1391ee1426a7c674451b9323b68a 100644 (file)
@@ -913,10 +913,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                goto tx_error;
        }
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT);
-       if (IS_ERR(skb)) {
+       if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
                ip_rt_put(rt);
-               goto out;
+               goto tx_error;
        }
 
        if (df) {
@@ -992,7 +991,6 @@ tx_error_icmp:
        dst_link_failure(skb);
 tx_error:
        kfree_skb(skb);
-out:
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
@@ -1002,15 +1000,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr  *tiph = &tunnel->parms.iph;
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-       if (IS_ERR(skb))
-               goto out;
+       if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+               goto tx_error;
 
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
        ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
        return NETDEV_TX_OK;
-out:
+tx_error:
+       kfree_skb(skb);
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
index aab91fa86c5e71aaaf2f96308e7b7d8918e959b5..59c483937aec1c626687c658efe10c7d25f47c10 100644 (file)
@@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 
        mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
        if (mss == 0) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
                goto out;
        }
 
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
index 0e621bc1ae11c83e6bc2f2bbf512a9edf12317cc..c4efaa97280c20918866e3311bb756abf0a325c4 100644 (file)
@@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                        skb->dev->ifindex);
 
        if (!sk) {
-               ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-                                  ICMP6_MIB_INERRORS);
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+                                 ICMP6_MIB_INERRORS);
                return;
        }
 
@@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
-               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == TCP_CLOSE)
                goto out;
 
        if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto out;
        }
 
@@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -439,7 +439,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
                              struct tcp_fastopen_cookie *foc,
-                             bool attach_req)
+                             enum tcp_synack_type synack_type)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -452,7 +452,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                                               IPPROTO_TCP)) == NULL)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req, foc, attach_req);
+       skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
        if (skb) {
                __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
                return false;
 
        if (hash_expected && !hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
                return true;
        }
 
        if (!hash_expected && hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
                return true;
        }
 
@@ -810,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        fl6.flowi6_proto = IPPROTO_TCP;
        if (rt6_need_strict(&fl6.daddr) && !oif)
                fl6.flowi6_oif = tcp_v6_iif(skb);
-       else
+       else {
+               if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+                       oif = skb->skb_iif;
+
                fl6.flowi6_oif = oif;
+       }
+
        fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
@@ -825,9 +830,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        if (!IS_ERR(dst)) {
                skb_dst_set(buff, dst);
                ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
-               TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+               TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
                if (rst)
-                       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+                       TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
                return;
        }
 
@@ -1165,7 +1170,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        return newsk;
 
 out_overflow:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out_nonewsk:
        dst_release(dst);
 out:
@@ -1276,8 +1281,8 @@ discard:
        kfree_skb(skb);
        return 0;
 csum_err:
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
        goto discard;
 
 
@@ -1359,7 +1364,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        /*
         *      Count it even if it's bad.
         */
-       TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_INSEGS);
 
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
@@ -1421,7 +1426,7 @@ process:
                }
        }
        if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto discard_and_relse;
        }
 
@@ -1454,7 +1459,7 @@ process:
        } else if (unlikely(sk_add_backlog(sk, skb,
                                           sk->sk_rcvbuf + sk->sk_sndbuf))) {
                bh_unlock_sock(sk);
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
        }
        bh_unlock_sock(sk);
@@ -1472,9 +1477,9 @@ no_tcp_socket:
 
        if (tcp_checksum_complete(skb)) {
 csum_error:
-               TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
+               __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
 bad_packet:
-               TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+               __TCP_INC_STATS(net, TCP_MIB_INERRS);
        } else {
                tcp_v6_send_reset(NULL, skb);
        }
index a050b70b91011b541c06d64f5d36f8e306afd403..aca06094110f9def54c90d113fd78912bd6000a1 100644 (file)
@@ -423,24 +423,22 @@ try_again:
                if (!peeked) {
                        atomic_inc(&sk->sk_drops);
                        if (is_udp4)
-                               UDP_INC_STATS_USER(sock_net(sk),
-                                                  UDP_MIB_INERRORS,
-                                                  is_udplite);
+                               UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+                                             is_udplite);
                        else
-                               UDP6_INC_STATS_USER(sock_net(sk),
-                                                   UDP_MIB_INERRORS,
-                                                   is_udplite);
+                               UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+                                              is_udplite);
                }
                skb_free_datagram_locked(sk, skb);
                return err;
        }
        if (!peeked) {
                if (is_udp4)
-                       UDP_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_INDATAGRAMS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
+                                     is_udplite);
                else
-                       UDP6_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_INDATAGRAMS, is_udplite);
+                       UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
+                                      is_udplite);
        }
 
        sock_recv_ts_and_drops(msg, sk, skb);
@@ -487,15 +485,15 @@ csum_copy_err:
        slow = lock_sock_fast(sk);
        if (!skb_kill_datagram(sk, skb, flags)) {
                if (is_udp4) {
-                       UDP_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_CSUMERRORS, is_udplite);
-                       UDP_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_INERRORS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk),
+                                     UDP_MIB_CSUMERRORS, is_udplite);
+                       UDP_INC_STATS(sock_net(sk),
+                                     UDP_MIB_INERRORS, is_udplite);
                } else {
-                       UDP6_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_CSUMERRORS, is_udplite);
-                       UDP6_INC_STATS_USER(sock_net(sk),
-                                       UDP_MIB_INERRORS, is_udplite);
+                       UDP6_INC_STATS(sock_net(sk),
+                                      UDP_MIB_CSUMERRORS, is_udplite);
+                       UDP6_INC_STATS(sock_net(sk),
+                                      UDP_MIB_INERRORS, is_udplite);
                }
        }
        unlock_sock_fast(sk, slow);
@@ -523,8 +521,8 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
                               inet6_iif(skb), udptable, skb);
        if (!sk) {
-               ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-                                  ICMP6_MIB_INERRORS);
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+                                 ICMP6_MIB_INERRORS);
                return;
        }
 
@@ -572,9 +570,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* Note that an ENOMEM error is charged twice */
                if (rc == -ENOMEM)
-                       UDP6_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_RCVBUFERRORS, is_udplite);
-               UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+                       UDP6_INC_STATS(sock_net(sk),
+                                        UDP_MIB_RCVBUFERRORS, is_udplite);
+               UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
                kfree_skb(skb);
                return -1;
        }
@@ -630,9 +628,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
-                               UDP_INC_STATS_BH(sock_net(sk),
-                                                UDP_MIB_INDATAGRAMS,
-                                                is_udplite);
+                               __UDP_INC_STATS(sock_net(sk),
+                                               UDP_MIB_INDATAGRAMS,
+                                               is_udplite);
                                return -ret;
                        }
                }
@@ -666,8 +664,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
-               UDP6_INC_STATS_BH(sock_net(sk),
-                                 UDP_MIB_RCVBUFERRORS, is_udplite);
+               __UDP6_INC_STATS(sock_net(sk),
+                                UDP_MIB_RCVBUFERRORS, is_udplite);
                goto drop;
        }
 
@@ -686,9 +684,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        return rc;
 
 csum_error:
-       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+       __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
-       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+       __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        atomic_inc(&sk->sk_drops);
        kfree_skb(skb);
        return -1;
@@ -771,10 +769,10 @@ start_lookup:
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (unlikely(!nskb)) {
                        atomic_inc(&sk->sk_drops);
-                       UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
-                                         IS_UDPLITE(sk));
-                       UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS,
-                                         IS_UDPLITE(sk));
+                       __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+                                        IS_UDPLITE(sk));
+                       __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+                                        IS_UDPLITE(sk));
                        continue;
                }
 
@@ -793,8 +791,8 @@ start_lookup:
                        consume_skb(skb);
        } else {
                kfree_skb(skb);
-               UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-                                 proto == IPPROTO_UDPLITE);
+               __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+                                proto == IPPROTO_UDPLITE);
        }
        return 0;
 }
@@ -887,7 +885,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp_lib_checksum_complete(skb))
                goto csum_error;
 
-       UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+       __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
        icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -901,9 +899,9 @@ short_packet:
                            daddr, ntohs(uh->dest));
        goto discard;
 csum_error:
-       UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
+       __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 discard:
-       UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
+       __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
        kfree_skb(skb);
        return 0;
 }
@@ -1015,13 +1013,14 @@ send:
        err = ip6_send_skb(skb);
        if (err) {
                if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
-                       UDP6_INC_STATS_USER(sock_net(sk),
-                                           UDP_MIB_SNDBUFERRORS, is_udplite);
+                       UDP6_INC_STATS(sock_net(sk),
+                                      UDP_MIB_SNDBUFERRORS, is_udplite);
                        err = 0;
                }
-       } else
-               UDP6_INC_STATS_USER(sock_net(sk),
-                                   UDP_MIB_OUTDATAGRAMS, is_udplite);
+       } else {
+               UDP6_INC_STATS(sock_net(sk),
+                              UDP_MIB_OUTDATAGRAMS, is_udplite);
+       }
        return err;
 }
 
@@ -1065,11 +1064,9 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct ip6_flowlabel *flowlabel = NULL;
        struct flowi6 fl6;
        struct dst_entry *dst;
+       struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
        int ulen = len;
-       int hlimit = -1;
-       int tclass = -1;
-       int dontfrag = -1;
        int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
        int err;
        int connected = 0;
@@ -1077,6 +1074,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
        struct sockcm_cookie sockc;
 
+       ipc6.hlimit = -1;
+       ipc6.tclass = -1;
+       ipc6.dontfrag = -1;
+
        /* destination address check */
        if (sin6) {
                if (addr_len < offsetof(struct sockaddr, sa_data))
@@ -1201,10 +1202,9 @@ do_udp_sendmsg:
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
+               ipc6.opt = opt;
 
-               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                           &hlimit, &tclass, &dontfrag,
-                                           &sockc);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -1225,6 +1225,7 @@ do_udp_sendmsg:
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
+       ipc6.opt = opt;
 
        fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
@@ -1254,11 +1255,11 @@ do_udp_sendmsg:
                goto out;
        }
 
-       if (hlimit < 0)
-               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       if (ipc6.hlimit < 0)
+               ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-       if (tclass < 0)
-               tclass = np->tclass;
+       if (ipc6.tclass < 0)
+               ipc6.tclass = np->tclass;
 
        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
@@ -1269,9 +1270,9 @@ back_from_confirm:
                struct sk_buff *skb;
 
                skb = ip6_make_skb(sk, getfrag, msg, ulen,
-                                  sizeof(struct udphdr), hlimit, tclass, opt,
+                                  sizeof(struct udphdr), &ipc6,
                                   &fl6, (struct rt6_info *)dst,
-                                  msg->msg_flags, dontfrag, &sockc);
+                                  msg->msg_flags, &sockc);
                err = PTR_ERR(skb);
                if (!IS_ERR_OR_NULL(skb))
                        err = udp_v6_send_skb(skb, &fl6);
@@ -1292,14 +1293,12 @@ back_from_confirm:
        up->pending = AF_INET6;
 
 do_append_data:
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
+       if (ipc6.dontfrag < 0)
+               ipc6.dontfrag = np->dontfrag;
        up->len += ulen;
-       err = ip6_append_data(sk, getfrag, msg, ulen,
-               sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
-               (struct rt6_info *)dst,
-               corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag,
-               &sockc);
+       err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
+                             &ipc6, &fl6, (struct rt6_info *)dst,
+                             corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc);
        if (err)
                udp_v6_flush_pending_frames(sk);
        else if (!corkreq)
@@ -1342,8 +1341,8 @@ out:
         * seems like overkill.
         */
        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-               UDP6_INC_STATS_USER(sock_net(sk),
-                               UDP_MIB_SNDBUFERRORS, is_udplite);
+               UDP6_INC_STATS(sock_net(sk),
+                              UDP_MIB_SNDBUFERRORS, is_udplite);
        }
        return err;
 
@@ -1490,6 +1489,7 @@ struct proto udpv6_prot = {
        .sendmsg           = udpv6_sendmsg,
        .recvmsg           = udpv6_recvmsg,
        .backlog_rcv       = __udpv6_queue_rcv_skb,
+       .release_cb        = ip6_datagram_release_cb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .rehash            = udp_v6_rehash,
index fcfbe579434ae8d4ffb989f0ac82b250512c5637..d8b7267280c38fdea5930764d5ca8d2b11746fd7 100644 (file)
@@ -181,7 +181,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
                skb = new_skb;
        }
 
-       dev->trans_start = jiffies;
+       netif_trans_update(dev);
 
        len = skb->len;
        /* Now queue the packet in the transport layer */
index afca2eb4dfa777c75288dfb6fce9636b309a2ebc..6edfa99803148815e383eb13ce6a9c1eb098058d 100644 (file)
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
                               sizeof(udp_conf.peer_ip6));
                        udp_conf.use_udp6_tx_checksums =
-                           cfg->udp6_zero_tx_checksums;
+                         ! cfg->udp6_zero_tx_checksums;
                        udp_conf.use_udp6_rx_checksums =
-                           cfg->udp6_zero_rx_checksums;
+                         ! cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
index ec22078b0914ff7ce65c3b11801504e252102dc2..42de4ccd159f6f6853930afd44cea239e2011a54 100644 (file)
@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        struct l2tp_tunnel *tunnel = NULL;
        int length;
 
-       /* Point to L2TP header */
-       optr = ptr = skb->data;
-
        if (!pskb_may_pull(skb, 4))
                goto discard;
 
+       /* Point to L2TP header */
+       optr = ptr = skb->data;
        session_id = ntohl(*((__be32 *) ptr));
        ptr += 4;
 
@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
                if (!pskb_may_pull(skb, length))
                        goto discard;
 
+               /* Point to L2TP header */
+               optr = ptr = skb->data;
+               ptr += 4;
                pr_debug("%s: ip recv\n", tunnel->name);
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
index 1a38f20b1ca620e19c7b1826158e062862b51acc..c6f5df1bed121e2b701c50542bddb6e403b1e1d8 100644 (file)
@@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
        struct l2tp_tunnel *tunnel = NULL;
        int length;
 
-       /* Point to L2TP header */
-       optr = ptr = skb->data;
-
        if (!pskb_may_pull(skb, 4))
                goto discard;
 
+       /* Point to L2TP header */
+       optr = ptr = skb->data;
        session_id = ntohl(*((__be32 *) ptr));
        ptr += 4;
 
@@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
                if (!pskb_may_pull(skb, length))
                        goto discard;
 
+               /* Point to L2TP header */
+               optr = ptr = skb->data;
+               ptr += 4;
                pr_debug("%s: ip recv\n", tunnel->name);
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
@@ -493,10 +495,8 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct dst_entry *dst = NULL;
        struct flowi6 fl6;
        struct sockcm_cookie sockc_unused = {0};
+       struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
-       int hlimit = -1;
-       int tclass = -1;
-       int dontfrag = -1;
        int transhdrlen = 4; /* zero session-id */
        int ulen = len + transhdrlen;
        int err;
@@ -518,6 +518,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        fl6.flowi6_mark = sk->sk_mark;
 
+       ipc6.hlimit = -1;
+       ipc6.tclass = -1;
+       ipc6.dontfrag = -1;
+
        if (lsa) {
                if (addr_len < SIN6_LEN_RFC2133)
                        return -EINVAL;
@@ -562,11 +566,11 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
+               ipc6.opt = opt;
 
-                err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                            &hlimit, &tclass, &dontfrag,
-                                            &sockc_unused);
-                if (err < 0) {
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6,
+                                           &sockc_unused);
+               if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
                }
@@ -586,6 +590,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
+       ipc6.opt = opt;
 
        fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
@@ -610,14 +615,14 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                goto out;
        }
 
-       if (hlimit < 0)
-               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+       if (ipc6.hlimit < 0)
+               ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-       if (tclass < 0)
-               tclass = np->tclass;
+       if (ipc6.tclass < 0)
+               ipc6.tclass = np->tclass;
 
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
+       if (ipc6.dontfrag < 0)
+               ipc6.dontfrag = np->dontfrag;
 
        if (msg->msg_flags & MSG_CONFIRM)
                goto do_confirm;
@@ -625,9 +630,9 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 back_from_confirm:
        lock_sock(sk);
        err = ip6_append_data(sk, ip_generic_getfrag, msg,
-                             ulen, transhdrlen, hlimit, tclass, opt,
+                             ulen, transhdrlen, &ipc6,
                              &fl6, (struct rt6_info *)dst,
-                             msg->msg_flags, dontfrag, &sockc_unused);
+                             msg->msg_flags, &sockc_unused);
        if (err)
                ip6_flush_pending_frames(sk);
        else if (!(msg->msg_flags & MSG_MORE))
index 2caaa84ce92dac811c7813ebbb233c9596d03ca0..1d02e8d20e561f91a395d4e330822138bf182f51 100644 (file)
@@ -346,22 +346,30 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
-                   atomic_long_read(&tunnel->stats.tx_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
-                   atomic_long_read(&tunnel->stats.tx_bytes)) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
-                   atomic_long_read(&tunnel->stats.tx_errors)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
-                   atomic_long_read(&tunnel->stats.rx_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
-                   atomic_long_read(&tunnel->stats.rx_bytes)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-                   atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-                   atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
-                   atomic_long_read(&tunnel->stats.rx_errors)))
+       if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+                             atomic_long_read(&tunnel->stats.tx_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+                             atomic_long_read(&tunnel->stats.tx_bytes),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+                             atomic_long_read(&tunnel->stats.tx_errors),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+                             atomic_long_read(&tunnel->stats.rx_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+                             atomic_long_read(&tunnel->stats.rx_bytes),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+                             atomic_long_read(&tunnel->stats.rx_seq_discards),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+                             atomic_long_read(&tunnel->stats.rx_oos_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+                             atomic_long_read(&tunnel->stats.rx_errors),
+                             L2TP_ATTR_STATS_PAD))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
@@ -746,29 +754,38 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
             nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
 #endif
            (session->reorder_timeout &&
-            nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+            nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
+                          session->reorder_timeout, L2TP_ATTR_PAD)))
                goto nla_put_failure;
 
        nest = nla_nest_start(skb, L2TP_ATTR_STATS);
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
-               atomic_long_read(&session->stats.tx_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
-               atomic_long_read(&session->stats.tx_bytes)) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
-               atomic_long_read(&session->stats.tx_errors)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
-               atomic_long_read(&session->stats.rx_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
-               atomic_long_read(&session->stats.rx_bytes)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-               atomic_long_read(&session->stats.rx_seq_discards)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-               atomic_long_read(&session->stats.rx_oos_packets)) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
-               atomic_long_read(&session->stats.rx_errors)))
+       if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+                             atomic_long_read(&session->stats.tx_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+                             atomic_long_read(&session->stats.tx_bytes),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+                             atomic_long_read(&session->stats.tx_errors),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+                             atomic_long_read(&session->stats.rx_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+                             atomic_long_read(&session->stats.rx_bytes),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+                             atomic_long_read(&session->stats.rx_seq_discards),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+                             atomic_long_read(&session->stats.rx_oos_packets),
+                             L2TP_ATTR_STATS_PAD) ||
+           nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+                             atomic_long_read(&session->stats.rx_errors),
+                             L2TP_ATTR_STATS_PAD))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
index e925037fa0df06b51b4ea8163707c039bfcdc03e..6651a78e100cdf987a5963fc84be5484221053ef 100644 (file)
@@ -97,3 +97,66 @@ u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
        return tb_id;
 }
 EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
+
+/**
+ *     l3mdev_get_rt6_dst - IPv6 route lookup based on flow. Returns
+ *                          cached route for L3 master device if relevant
+ *                          to flow
+ *     @net: network namespace for device index lookup
+ *     @fl6: IPv6 flow struct for lookup
+ */
+
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
+                                    const struct flowi6 *fl6)
+{
+       struct dst_entry *dst = NULL;
+       struct net_device *dev;
+
+       if (fl6->flowi6_oif) {
+               rcu_read_lock();
+
+               dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
+               if (dev && netif_is_l3_slave(dev))
+                       dev = netdev_master_upper_dev_get_rcu(dev);
+
+               if (dev && netif_is_l3_master(dev) &&
+                   dev->l3mdev_ops->l3mdev_get_rt6_dst)
+                       dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+
+               rcu_read_unlock();
+       }
+
+       return dst;
+}
+EXPORT_SYMBOL_GPL(l3mdev_get_rt6_dst);
+
+/**
+ *     l3mdev_get_saddr - get source address for a flow based on an interface
+ *                        enslaved to an L3 master device
+ *     @net: network namespace for device index lookup
+ *     @ifindex: Interface index
+ *     @fl4: IPv4 flow struct
+ */
+
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
+{
+       struct net_device *dev;
+       int rc = 0;
+
+       if (ifindex) {
+               rcu_read_lock();
+
+               dev = dev_get_by_index_rcu(net, ifindex);
+               if (dev && netif_is_l3_slave(dev))
+                       dev = netdev_master_upper_dev_get_rcu(dev);
+
+               if (dev && netif_is_l3_master(dev) &&
+                   dev->l3mdev_ops->l3mdev_get_saddr)
+                       rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
+
+               rcu_read_unlock();
+       }
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
index b3c52e3f689ad16468eb603f1a5d11c55f3d7ae5..8ae3ed97d95cb4a5f05b69b8c2922d57072a6fd1 100644 (file)
@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
        if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
                struct llc_pktinfo info;
 
+               memset(&info, 0, sizeof(info));
                info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
                llc_pdu_decode_dsap(skb, &info.lpi_sap);
                llc_pdu_decode_da(skb, info.lpi_mac);
index 1a3c7e0f5d0de3c1d35759e1b2ae1bfa7849af84..29c509c54bb22d040aa40af214a58858429dd369 100644 (file)
@@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v)
                   timer_pending(&llc->pf_cycle_timer.timer),
                   timer_pending(&llc->rej_sent_timer.timer),
                   timer_pending(&llc->busy_state_timer.timer),
-                  !!sk->sk_backlog.tail, !!sock_owned_by_user(sk));
+                  !!sk->sk_backlog.tail, !!sk->sk_lock.owned);
 out:
        return 0;
 }
index fc4730b938d07cbff8b795d3e5ffd015515b801c..0c12e4001f192a329ca62e4260e21598c284dad1 100644 (file)
@@ -1049,7 +1049,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
        int ret = 0;
        struct ieee80211_supported_band *sband;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        u32 mask, set;
 
        sband = local->hw.wiphy->bands[band];
@@ -1848,7 +1848,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
                                struct bss_parameters *params)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u32 changed = 0;
 
        if (!sdata_dereference(sdata->u.ap.beacon, sdata))
@@ -1867,7 +1867,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        }
 
        if (!sdata->vif.bss_conf.use_short_slot &&
-           band == IEEE80211_BAND_5GHZ) {
+           band == NL80211_BAND_5GHZ) {
                sdata->vif.bss_conf.use_short_slot = true;
                changed |= BSS_CHANGED_ERP_SLOT;
        }
@@ -2097,12 +2097,12 @@ static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev)
 }
 
 static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
-                                   int rate[IEEE80211_NUM_BANDS])
+                                   int rate[NUM_NL80211_BANDS])
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
        memcpy(sdata->vif.bss_conf.mcast_rate, rate,
-              sizeof(int) * IEEE80211_NUM_BANDS);
+              sizeof(int) * NUM_NL80211_BANDS);
 
        return 0;
 }
@@ -2507,7 +2507,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                        return ret;
        }
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                struct ieee80211_supported_band *sband = wiphy->bands[i];
                int j;
 
@@ -3135,7 +3135,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
        struct ieee80211_tx_info *info;
        struct sta_info *sta;
        struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int ret;
 
        /* the lock is needed to assign the cookie later */
index 283981108ca80cb5bb7182b28e626a1a14c09fb1..74142d07ad31072f8308681a3ff3cfd55da74738 100644 (file)
@@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
                                     struct ieee80211_chanctx *ctx,
                                     const struct cfg80211_chan_def *chandef)
 {
-       if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
+       if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+               ieee80211_recalc_chanctx_min_def(local, ctx);
                return;
+       }
 
        WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
 
index 37ea30e0754c2b3d67b93f53fc9a582e2757dc3a..a5ba739cd2a7480587520873c88b5836bfe382cf 100644 (file)
@@ -169,21 +169,21 @@ static ssize_t ieee80211_if_write_##name(struct file *file,               \
        IEEE80211_IF_FILE_R(name)
 
 /* common attributes */
-IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
+IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ],
                  HEX);
-IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
+IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ],
                  HEX);
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
-                 rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY);
+                 rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY);
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
-                 rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
+                 rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY);
 
 static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
                                const struct ieee80211_sub_if_data *sdata,
                                char *buf, int buflen)
 {
        int i, len = 0;
-       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ];
 
        for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
                len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
@@ -199,7 +199,7 @@ static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
                                char *buf, int buflen)
 {
        int i, len = 0;
-       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ];
 
        for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
                len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
index c6d4b75eb60b34fed9585e9016dd0e97087bf856..a31d30713d0897e7b41f71b444443abdd8c94645 100644 (file)
@@ -126,7 +126,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (sband->band == IEEE80211_BAND_2GHZ) {
+       if (sband->band == NL80211_BAND_2GHZ) {
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
                *pos++ = ieee80211_frequency_to_channel(
@@ -348,11 +348,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
         *
         * HT follows these specifications (IEEE 802.11-2012 20.3.18)
         */
-       sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
+       sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ;
        bss_change |= BSS_CHANGED_ERP_SLOT;
 
        /* cf. IEEE 802.11 9.2.12 */
-       if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+       if (chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit)
                sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
        else
                sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
@@ -989,7 +989,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                                      struct ieee80211_channel *channel)
 {
        struct sta_info *sta;
-       enum ieee80211_band band = rx_status->band;
+       enum nl80211_band band = rx_status->band;
        enum nl80211_bss_scan_width scan_width;
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
@@ -1109,7 +1109,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_channel *channel;
        u64 beacon_timestamp, rx_timestamp;
        u32 supp_rates = 0;
-       enum ieee80211_band band = rx_status->band;
+       enum nl80211_band band = rx_status->band;
 
        channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
        if (!channel)
index 40c1d343992cfa0c0afc02404acf8fbdf5297aa9..9438c940668719cfea2e1f4b6e03ab5c88c2211d 100644 (file)
@@ -896,13 +896,13 @@ struct ieee80211_sub_if_data {
        struct ieee80211_if_ap *bss;
 
        /* bitmap of allowed (non-MCS) rate indexes for rate control */
-       u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
+       u32 rc_rateidx_mask[NUM_NL80211_BANDS];
 
-       bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
-       u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
+       bool rc_has_mcs_mask[NUM_NL80211_BANDS];
+       u8  rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
-       bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
-       u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+       bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS];
+       u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX];
 
        union {
                struct ieee80211_if_ap ap;
@@ -957,10 +957,10 @@ sdata_assert_lock(struct ieee80211_sub_if_data *sdata)
        lockdep_assert_held(&sdata->wdev.mtx);
 }
 
-static inline enum ieee80211_band
+static inline enum nl80211_band
 ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
 {
-       enum ieee80211_band band = IEEE80211_BAND_2GHZ;
+       enum nl80211_band band = NL80211_BAND_2GHZ;
        struct ieee80211_chanctx_conf *chanctx_conf;
 
        rcu_read_lock();
@@ -1231,7 +1231,7 @@ struct ieee80211_local {
        struct cfg80211_scan_request __rcu *scan_req;
        struct ieee80211_scan_request *hw_scan_req;
        struct cfg80211_chan_def scan_chandef;
-       enum ieee80211_band hw_scan_band;
+       enum nl80211_band hw_scan_band;
        int scan_channel_idx;
        int scan_ies_len;
        int hw_scan_ies_bufsize;
@@ -1730,14 +1730,18 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
 void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
+void ieee80211_sta_set_rx_nss(struct sta_info *sta);
 void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
                                 struct ieee80211_mgmt *mgmt);
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                   struct sta_info *sta, u8 opmode,
-                                 enum ieee80211_band band);
+                                 enum nl80211_band band);
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta, u8 opmode,
-                                enum ieee80211_band band);
+                                enum nl80211_band band);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
                                      struct ieee80211_sta_vht_cap *vht_cap);
 void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
@@ -1765,7 +1769,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
  */
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                                 struct ieee802_11_elems *elems,
-                                enum ieee80211_band current_band,
+                                enum nl80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie);
 
@@ -1790,7 +1794,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
+int ieee80211_frame_duration(enum nl80211_band band, size_t len,
                             int rate, int erp, int short_preamble,
                             int shift);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
@@ -1800,12 +1804,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
-                                enum ieee80211_band band);
+                                enum nl80211_band band);
 
 static inline void
 ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                          struct sk_buff *skb, int tid,
-                         enum ieee80211_band band)
+                         enum nl80211_band band)
 {
        rcu_read_lock();
        __ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
@@ -1960,7 +1964,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
 
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
-                           enum ieee80211_band band, u32 *basic_rates);
+                           enum nl80211_band band, u32 *basic_rates);
 int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
                                 enum ieee80211_smps_mode smps_mode);
 int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
@@ -1983,10 +1987,10 @@ int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
                             const u8 *srates, int srates_len, u32 *rates);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
                            struct sk_buff *skb, bool need_basic,
-                           enum ieee80211_band band);
+                           enum nl80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                                struct sk_buff *skb, bool need_basic,
-                               enum ieee80211_band band);
+                               enum nl80211_band band);
 u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 
 /* channel management */
index 097ece8b5c021b00a0484fa74b05b84feef8a35c..c59af3eb9fa46c0cff03ead65e6b5353e3bf508d 100644 (file)
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = dev_alloc_name(ndev, ndev->name);
                if (ret < 0) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
 
@@ -1800,7 +1800,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
                          ieee80211_delayed_tailroom_dec);
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                struct ieee80211_supported_band *sband;
                sband = local->hw.wiphy->bands[i];
                sdata->rc_rateidx_mask[i] =
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = register_netdevice(ndev);
                if (ret) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
        }
index 33c80de61eca37a41937f2afc18a8a35715d0791..7ee91d6151d1dbd549cae779be8125fd228285c1 100644 (file)
@@ -801,7 +801,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        int result, i;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int channels, max_bitrates;
        bool supp_ht, supp_vht;
        netdev_features_t feature_whitelist;
@@ -874,7 +874,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        max_bitrates = 0;
        supp_ht = false;
        supp_vht = false;
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband;
 
                sband = local->hw.wiphy->bands[band];
@@ -936,7 +936,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        if (!local->int_scan_req)
                return -ENOMEM;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!local->hw.wiphy->bands[band])
                        continue;
                local->int_scan_req->rates[band] = (u32) -1;
index dcc1facc807ca11930dd1afb078593bb3bbf236e..4c6404e1ad6e683da649f507829c19b232fcd417 100644 (file)
@@ -415,7 +415,7 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
                       struct sk_buff *skb)
 {
        struct ieee80211_local *local = sdata->local;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband;
        u8 *pos;
 
@@ -478,7 +478,7 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
                        struct sk_buff *skb)
 {
        struct ieee80211_local *local = sdata->local;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband;
        u8 *pos;
 
@@ -680,7 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
        struct ieee80211_mgmt *mgmt;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct mesh_csa_settings *csa;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        u8 *pos;
        struct ieee80211_sub_if_data *sdata;
        int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
@@ -930,7 +930,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_csa_settings params;
        struct ieee80211_csa_ie csa_ie;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        int err;
        u32 sta_flags;
 
@@ -1084,7 +1084,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_channel *channel;
        size_t baselen;
        int freq;
-       enum ieee80211_band band = rx_status->band;
+       enum nl80211_band band = rx_status->band;
 
        /* ignore ProbeResp to foreign address */
        if (stype == IEEE80211_STYPE_PROBE_RESP &&
index 2748cf627ee31afaa679ac76c31b8d4644b01b0e..8f9c3bde835f4a3a8f1c777c0b2af0fea81acf2d 100644 (file)
@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
        const u8 *target_addr, *orig_addr;
        const u8 *da;
        u8 target_flags, ttl, flags;
-       u32 orig_sn, target_sn, lifetime, target_metric;
+       u32 orig_sn, target_sn, lifetime, target_metric = 0;
        bool reply = false;
        bool forward = true;
        bool root_is_gate;
index 563bea050383892a752a9c36d8bf008a39afb2ae..79f2a0a13db8ee6572a152374da196e17ef8d94a 100644 (file)
@@ -93,18 +93,18 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        struct sta_info *sta;
        u32 erp_rates = 0, changed = 0;
        int i;
        bool short_slot = false;
 
-       if (band == IEEE80211_BAND_5GHZ) {
+       if (band == NL80211_BAND_5GHZ) {
                /* (IEEE 802.11-2012 19.4.5) */
                short_slot = true;
                goto out;
-       } else if (band != IEEE80211_BAND_2GHZ)
+       } else if (band != NL80211_BAND_2GHZ)
                goto out;
 
        for (i = 0; i < sband->n_bitrates; i++)
@@ -247,7 +247,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.self_prot.action_code = action;
 
        if (action != WLAN_SP_MESH_PEERING_CLOSE) {
-               enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+               enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 
                /* capability info */
                pos = skb_put(skb, 2);
@@ -385,7 +385,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
                               struct ieee802_11_elems *elems, bool insert)
 {
        struct ieee80211_local *local = sdata->local;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband;
        u32 rates, basic_rates = 0, changed = 0;
        enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
index d3c75ac8a029aabdb0be0b61035a1643c5b8a587..885f4ca0888d510f255c8ff9920808e66f39df53 100644 (file)
@@ -661,7 +661,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 
        capab = WLAN_CAPABILITY_ESS;
 
-       if (sband->band == IEEE80211_BAND_2GHZ) {
+       if (sband->band == NL80211_BAND_2GHZ) {
                capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
                capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
        }
@@ -1100,7 +1100,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_bss *cbss = ifmgd->associated;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *chanctx;
-       enum ieee80211_band current_band;
+       enum nl80211_band current_band;
        struct ieee80211_csa_ie csa_ie;
        struct ieee80211_channel_switch ch_switch;
        int res;
@@ -1257,11 +1257,11 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
        default:
                WARN_ON_ONCE(1);
                /* fall through */
-       case IEEE80211_BAND_2GHZ:
-       case IEEE80211_BAND_60GHZ:
+       case NL80211_BAND_2GHZ:
+       case NL80211_BAND_60GHZ:
                chan_increment = 1;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                chan_increment = 4;
                break;
        }
@@ -1861,7 +1861,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
        }
 
        use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-       if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ)
+       if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ)
                use_short_slot = true;
 
        if (use_protection != bss_conf->use_cts_prot) {
@@ -4375,7 +4375,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                sdata->vif.bss_conf.basic_rates = basic_rates;
 
                /* cf. IEEE 802.11 9.2.12 */
-               if (cbss->channel->band == IEEE80211_BAND_2GHZ &&
+               if (cbss->channel->band == NL80211_BAND_2GHZ &&
                    have_higher_than_11mbit)
                        sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
                else
index a4e2f4e67f941084714fedab7b7e087df4fbb47f..206698bc93f406939bb5d883b6ab2f04bc1a3bed 100644 (file)
@@ -287,7 +287,7 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
        u32 rate_flags =
                ieee80211_chandef_rate_flags(&hw->conf.chandef);
 
-       if ((sband->band == IEEE80211_BAND_2GHZ) &&
+       if ((sband->band == NL80211_BAND_2GHZ) &&
            (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
                rate_flags |= IEEE80211_RATE_ERP_G;
 
index b54f398cda5d0e2d561f2383f1d049a5410fcf68..14c5ba3a1b1c6c3b139729f8921ec01ef1b03d3d 100644 (file)
@@ -436,7 +436,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
 
 
 static void
-calc_rate_durations(enum ieee80211_band band,
+calc_rate_durations(enum nl80211_band band,
                    struct minstrel_rate *d,
                    struct ieee80211_rate *rate,
                    struct cfg80211_chan_def *chandef)
@@ -579,7 +579,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
        if (!mi)
                return NULL;
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                sband = hw->wiphy->bands[i];
                if (sband && sband->n_bitrates > max_rates)
                        max_rates = sband->n_bitrates;
@@ -621,7 +621,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
        u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
        int i, j;
 
-       sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
        if (!sband)
                return;
 
index d77a9a8423387937e47e1b1e53f50d10ef1679f9..30fbabf4bcbc16aeb93e9c673d190cd3b615988b 100644 (file)
@@ -1137,7 +1137,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
 {
        int i;
 
-       if (sband->band != IEEE80211_BAND_2GHZ)
+       if (sband->band != NL80211_BAND_2GHZ)
                return;
 
        if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
@@ -1335,7 +1335,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
        int max_rates = 0;
        int i;
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                sband = hw->wiphy->bands[i];
                if (sband && sband->n_bitrates > max_rates)
                        max_rates = sband->n_bitrates;
index c2b659e9a9f96097e197bc419083c12acfa1bc81..c5678703921e0318e285c7c3b014827820d8b7a8 100644 (file)
@@ -322,7 +322,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        else if (status->flag & RX_FLAG_5MHZ)
                channel_flags |= IEEE80211_CHAN_QUARTER;
 
-       if (status->band == IEEE80211_BAND_5GHZ)
+       if (status->band == NL80211_BAND_5GHZ)
                channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
        else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
                channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
@@ -2823,7 +2823,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                switch (mgmt->u.action.u.measurement.action_code) {
                case WLAN_ACTION_SPCT_MSR_REQ:
-                       if (status->band != IEEE80211_BAND_5GHZ)
+                       if (status->band != NL80211_BAND_5GHZ)
                                break;
 
                        if (len < (IEEE80211_MIN_ACTION_SIZE +
@@ -4043,7 +4043,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
 
        WARN_ON_ONCE(softirq_count() == 0);
 
-       if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
+       if (WARN_ON(status->band >= NUM_NL80211_BANDS))
                goto drop;
 
        sband = local->hw.wiphy->bands[status->band];
index 41aa728e54686518039f9fd78b9bd177baf10a71..f9648ef9e31fecddca35460834f3728a37e04afb 100644 (file)
@@ -272,7 +272,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
                n_chans = req->n_channels;
        } else {
                do {
-                       if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+                       if (local->hw_scan_band == NUM_NL80211_BANDS)
                                return false;
 
                        n_chans = 0;
@@ -485,7 +485,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        int i;
        struct ieee80211_sub_if_data *sdata;
        struct cfg80211_scan_request *scan_req;
-       enum ieee80211_band band = local->hw.conf.chandef.chan->band;
+       enum nl80211_band band = local->hw.conf.chandef.chan->band;
        u32 tx_flags;
 
        scan_req = rcu_dereference_protected(local->scan_req,
@@ -953,7 +953,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        int ret = -EBUSY, i, n_ch = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        mutex_lock(&local->mtx);
 
@@ -965,7 +965,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
        if (!channels) {
                int max_n;
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        if (!local->hw.wiphy->bands[band])
                                continue;
 
@@ -1085,7 +1085,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_scan_ies sched_scan_ies = {};
        struct cfg80211_chan_def chandef;
        int ret, i, iebufsz, num_bands = 0;
-       u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+       u32 rate_masks[NUM_NL80211_BANDS] = {};
        u8 bands_used = 0;
        u8 *ie;
        size_t len;
@@ -1097,7 +1097,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        if (!local->ops->sched_scan_start)
                return -ENOTSUPP;
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                if (local->hw.wiphy->bands[i]) {
                        bands_used |= BIT(i);
                        rate_masks[i] = (u32) -1;
index 06e6ac8cc693dcff9420f859e5cc8584b2592570..2ddc661f098862a29ee0f516ed01563a73c36aeb 100644 (file)
 
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                                 struct ieee802_11_elems *elems,
-                                enum ieee80211_band current_band,
+                                enum nl80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie)
 {
-       enum ieee80211_band new_band;
+       enum nl80211_band new_band;
        int new_freq;
        u8 new_chan_no;
        struct ieee80211_channel *new_chan;
index 960e13d8ed304570efd7bdeb2299be620498bff3..5ccfdbd406bdfe4b1613432eae2871fae01b0070 100644 (file)
@@ -67,6 +67,7 @@
 
 static const struct rhashtable_params sta_rht_params = {
        .nelem_hint = 3, /* start small */
+       .insecure_elasticity = true, /* Disable chain-length checks. */
        .automatic_shrinking = true,
        .head_offset = offsetof(struct sta_info, hash_node),
        .key_offset = offsetof(struct sta_info, addr),
@@ -259,11 +260,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
 }
 
 /* Caller must hold local->sta_mtx */
-static void sta_info_hash_add(struct ieee80211_local *local,
-                             struct sta_info *sta)
+static int sta_info_hash_add(struct ieee80211_local *local,
+                            struct sta_info *sta)
 {
-       rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
-                              sta_rht_params);
+       return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+                                     sta_rht_params);
 }
 
 static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -536,7 +537,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        set_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
        /* make the station visible */
-       sta_info_hash_add(local, sta);
+       err = sta_info_hash_add(local, sta);
+       if (err)
+               goto out_drop_sta;
 
        list_add_tail_rcu(&sta->list, &local->sta_list);
 
@@ -569,6 +572,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
  out_remove:
        sta_info_hash_del(local, sta);
        list_del_rcu(&sta->list);
+ out_drop_sta:
        local->num_sta--;
        synchronize_net();
        __cleanup_single_sta(sta);
index dd6c6d400208a536d07115147952fcb26c2a206f..c8b8ccc370eb70c75ab43a8e013e60c1f280debd 100644 (file)
@@ -431,7 +431,6 @@ struct ieee80211_sta_rx_stats {
  * @uploaded: set to true when sta is uploaded to the driver
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
- * @beacon_loss_count: number of times beacon loss has triggered
  * @rcu_head: RCU head used for freeing this station struct
  * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
  *     taken from HT/VHT capabilities or VHT operating mode notification
index c9eeb3f1280852b6ec1c44eb82755390cf959629..1c7d45a6d93ec3be6bf266410f327cc64c4ce358 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
- * Copyright 2015  Intel Deutschland GmbH
+ * Copyright 2015 - 2016 Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -15,6 +15,7 @@
 #include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
+#include "rate.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
@@ -46,7 +47,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
                           NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
                          !ifmgd->tdls_wider_bw_prohibited;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        bool vht = sband && sband->vht_cap.vht_supported;
        u8 *pos = (void *)skb_put(skb, 10);
@@ -183,7 +184,7 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
        if (status_code != 0)
                return 0;
 
-       if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) {
+       if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) {
                return WLAN_CAPABILITY_SHORT_SLOT_TIME |
                       WLAN_CAPABILITY_SHORT_PREAMBLE;
        }
@@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        /* IEEE802.11ac-2013 Table E-4 */
        u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
        struct cfg80211_chan_def uc = sta->tdls_chandef;
-       enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+       enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
        int i;
 
        /* only support upgrading non-narrow channels up to 80Mhz */
@@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        if (max_width > NL80211_CHAN_WIDTH_80)
                max_width = NL80211_CHAN_WIDTH_80;
 
-       if (uc.width == max_width)
+       if (uc.width >= max_width)
                return;
        /*
         * Channel usage constrains in the IEEE802.11ac-2013 specification only
@@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
                if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
                        uc.center_freq1 = centers_80mhz[i];
+                       uc.center_freq2 = 0;
                        uc.width = NL80211_CHAN_WIDTH_80;
                        break;
                }
@@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
                return;
 
        /* proceed to downgrade the chandef until usable or the same */
-       while (uc.width > max_width &&
+       while (uc.width > max_width ||
               !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
                                              sdata->wdev.iftype))
                ieee80211_chandef_downgrade(&uc);
@@ -355,7 +357,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                                   u8 action_code, bool initiator,
                                   const u8 *extra_ies, size_t extra_ies_len)
 {
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
        struct ieee80211_sta_ht_cap ht_cap;
@@ -542,7 +544,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        size_t offset = 0, noffset;
        struct sta_info *sta, *ap_sta;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       enum nl80211_band band = ieee80211_get_sdata_band(sdata);
        u8 *pos;
 
        mutex_lock(&local->sta_mtx);
@@ -609,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
        ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 
        /* only include VHT-operation if not on the 2.4GHz band */
-       if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+       if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
                /*
                 * if both peers support WIDER_BW, we can expand the chandef to
                 * a wider compatible one, up to 80MHz
@@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
        return ret;
 }
 
-static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
+                                        struct sta_info *sta)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *ctx;
+       enum nl80211_chan_width width;
+       struct ieee80211_supported_band *sband;
 
        mutex_lock(&local->chanctx_mtx);
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
                                         lockdep_is_held(&local->chanctx_mtx));
        if (conf) {
+               width = conf->def.width;
+               sband = local->hw.wiphy->bands[conf->def.chan->band];
                ctx = container_of(conf, struct ieee80211_chanctx, conf);
                ieee80211_recalc_chanctx_chantype(local, ctx);
+
+               /* if width changed and a peer is given, update its BW */
+               if (width != conf->def.width && sta &&
+                   test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
+                       enum ieee80211_sta_rx_bandwidth bw;
+
+                       bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
+                       bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+                       if (bw != sta->sta.bandwidth) {
+                               sta->sta.bandwidth = bw;
+                               rate_control_rate_update(local, sband, sta,
+                                                        IEEE80211_RC_BW_CHANGED);
+                               /*
+                                * if a TDLS peer BW was updated, we need to
+                                * recalc the chandef width again, to get the
+                                * correct chanctx min_def
+                                */
+                               ieee80211_recalc_chanctx_chantype(local, ctx);
+                       }
+               }
+
        }
        mutex_unlock(&local->chanctx_mtx);
 }
@@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        break;
                }
 
-               iee80211_tdls_recalc_chanctx(sdata);
-
                mutex_lock(&local->sta_mtx);
                sta = sta_info_get(sdata, peer);
                if (!sta) {
@@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        break;
                }
 
+               iee80211_tdls_recalc_chanctx(sdata, sta);
                iee80211_tdls_recalc_ht_protection(sdata, sta);
 
                set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
@@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                iee80211_tdls_recalc_ht_protection(sdata, NULL);
                mutex_unlock(&local->sta_mtx);
 
-               iee80211_tdls_recalc_chanctx(sdata);
+               iee80211_tdls_recalc_chanctx(sdata, NULL);
                break;
        default:
                ret = -ENOTSUPP;
@@ -1746,7 +1773,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
        u8 target_channel, oper_class;
        bool local_initiator;
        struct sta_info *sta;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_tdls_data *tf = (void *)skb->data;
        struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
        int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable);
@@ -1778,10 +1805,10 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
        if ((oper_class == 112 || oper_class == 2 || oper_class == 3 ||
             oper_class == 4 || oper_class == 5 || oper_class == 6) &&
             target_channel < 14)
-               band = IEEE80211_BAND_5GHZ;
+               band = NL80211_BAND_5GHZ;
        else
-               band = target_channel < 14 ? IEEE80211_BAND_2GHZ :
-                                            IEEE80211_BAND_5GHZ;
+               band = target_channel < 14 ? NL80211_BAND_2GHZ :
+                                            NL80211_BAND_5GHZ;
 
        freq = ieee80211_channel_to_frequency(target_channel, band);
        if (freq == 0) {
index 8c3b7ae103bc99955d6ea2195a092c8738b653cd..77e4c53baefb198ddf5129706d3e95a7ba8f612a 100644 (file)
@@ -401,7 +401,7 @@ TRACE_EVENT(drv_bss_info_changed,
                __field(u32, sync_device_ts)
                __field(u8, sync_dtim_count)
                __field(u32, basic_rates)
-               __array(int, mcast_rate, IEEE80211_NUM_BANDS)
+               __array(int, mcast_rate, NUM_NL80211_BANDS)
                __field(u16, ht_operation_mode)
                __field(s32, cqm_rssi_thold);
                __field(s32, cqm_rssi_hyst);
@@ -1265,8 +1265,8 @@ TRACE_EVENT(drv_set_bitrate_mask,
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               __entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy;
-               __entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy;
+               __entry->legacy_2g = mask->control[NL80211_BAND_2GHZ].legacy;
+               __entry->legacy_5g = mask->control[NL80211_BAND_5GHZ].legacy;
        ),
 
        TP_printk(
index 4fa2842ddb25599eddc36ba23e3d0b6ed20b1c41..203044379ce0c2f00d52e68b9a4255c6ad346641 100644 (file)
@@ -150,7 +150,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                        rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
 
                switch (sband->band) {
-               case IEEE80211_BAND_2GHZ: {
+               case NL80211_BAND_2GHZ: {
                        u32 flag;
                        if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
                                flag = IEEE80211_RATE_MANDATORY_G;
@@ -160,13 +160,13 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                                mrate = r->bitrate;
                        break;
                }
-               case IEEE80211_BAND_5GHZ:
+               case NL80211_BAND_5GHZ:
                        if (r->flags & IEEE80211_RATE_MANDATORY_A)
                                mrate = r->bitrate;
                        break;
-               case IEEE80211_BAND_60GHZ:
+               case NL80211_BAND_60GHZ:
                        /* TODO, for now fall through */
-               case IEEE80211_NUM_BANDS:
+               case NUM_NL80211_BANDS:
                        WARN_ON(1);
                        break;
                }
@@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                        reset_agg_timer = true;
                } else {
                        queued = true;
+                       if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+                               clear_sta_flag(tx->sta, WLAN_STA_SP);
+                               ps_dbg(tx->sta->sdata,
+                                      "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
+                                      tx->sta->sta.addr, tx->sta->sta.aid);
+                       }
                        info->control.vif = &tx->sdata->vif;
                        info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-                       info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
-                                       IEEE80211_TX_CTL_NO_PS_BUFFER |
-                                       IEEE80211_TX_STATUS_EOSP;
+                       info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
                        __skb_queue_tail(&tid_tx->pending, skb);
                        if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
                                purge_skb = __skb_dequeue(&tid_tx->pending);
@@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
        struct txq_info *txqi;
        u8 ac;
 
-       if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
+       if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+           (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
                goto tx_normal;
 
        if (!ieee80211_is_data(hdr->frame_control))
@@ -2133,7 +2138,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
        u16 info_id = 0;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_sub_if_data *ap_sdata;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int ret;
 
        if (IS_ERR(sta))
@@ -3592,7 +3597,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
        struct sk_buff *skb = NULL;
        struct ieee80211_tx_info *info;
        struct ieee80211_sub_if_data *sdata = NULL;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_tx_rate_control txrc;
        struct ieee80211_chanctx_conf *chanctx_conf;
        int csa_off_base = 0;
@@ -4160,7 +4165,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid);
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
-                                enum ieee80211_band band)
+                                enum nl80211_band band)
 {
        int ac = ieee802_1d_to_ac[tid & 7];
 
index 0319d6d4f86341e60749e17255e9210075e03835..905003f75c4d6434676d0a74da5ecc801b3b1554 100644 (file)
@@ -59,7 +59,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
        }
 }
 
-int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
+int ieee80211_frame_duration(enum nl80211_band band, size_t len,
                             int rate, int erp, int short_preamble,
                             int shift)
 {
@@ -77,7 +77,7 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
         * is assumed to be 0 otherwise.
         */
 
-       if (band == IEEE80211_BAND_5GHZ || erp) {
+       if (band == NL80211_BAND_5GHZ || erp) {
                /*
                 * OFDM:
                 *
@@ -129,7 +129,7 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 /* Exported duration function for driver use */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
-                                       enum ieee80211_band band,
+                                       enum nl80211_band band,
                                        size_t frame_len,
                                        struct ieee80211_rate *rate)
 {
@@ -1129,7 +1129,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        use_11b = (chanctx_conf &&
-                  chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) &&
+                  chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) &&
                 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
        rcu_read_unlock();
 
@@ -1301,7 +1301,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
 static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                                         u8 *buffer, size_t buffer_len,
                                         const u8 *ie, size_t ie_len,
-                                        enum ieee80211_band band,
+                                        enum nl80211_band band,
                                         u32 rate_mask,
                                         struct cfg80211_chan_def *chandef,
                                         size_t *offset)
@@ -1375,7 +1375,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                pos += ext_rates_len;
        }
 
-       if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
+       if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
                if (end - pos < 3)
                        goto out_err;
                *pos++ = WLAN_EID_DS_PARAMS;
@@ -1479,7 +1479,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
 
        memset(ie_desc, 0, sizeof(*ie_desc));
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                if (bands_used & BIT(i)) {
                        pos += ieee80211_build_preq_ies_band(local,
                                                             buffer + pos,
@@ -1522,7 +1522,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        struct sk_buff *skb;
        struct ieee80211_mgmt *mgmt;
        int ies_len;
-       u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+       u32 rate_masks[NUM_NL80211_BANDS] = {};
        struct ieee80211_scan_ies dummy_ie_desc;
 
        /*
@@ -1582,7 +1582,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
 
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
-                           enum ieee80211_band band, u32 *basic_rates)
+                           enum nl80211_band band, u32 *basic_rates)
 {
        struct ieee80211_supported_band *sband;
        size_t num_rates;
@@ -2520,7 +2520,7 @@ int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
                            struct sk_buff *skb, bool need_basic,
-                           enum ieee80211_band band)
+                           enum nl80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -2565,7 +2565,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                                struct sk_buff *skb, bool need_basic,
-                               enum ieee80211_band band)
+                               enum nl80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -2711,7 +2711,7 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
 
                if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
                        /* TODO: handle HT/VHT preambles */
-                       if (status->band == IEEE80211_BAND_5GHZ) {
+                       if (status->band == NL80211_BAND_5GHZ) {
                                ts += 20 << shift;
                                mpdu_offset += 2;
                        } else if (status->flag & RX_FLAG_SHORTPRE) {
index 89e04d55aa1832e522f8e8f9619bfabc8c81ee93..ee715764a828954e4c8d90e3bfbcbc697c100ec0 100644 (file)
@@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
        return IEEE80211_STA_RX_BW_80;
 }
 
-static enum ieee80211_sta_rx_bandwidth
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
+{
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+       u32 cap_width;
+
+       if (!vht_cap->vht_supported) {
+               if (!sta->sta.ht_cap.ht_supported)
+                       return NL80211_CHAN_WIDTH_20_NOHT;
+
+               return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+                               NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
+       }
+
+       cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+       if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
+               return NL80211_CHAN_WIDTH_160;
+       else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+               return NL80211_CHAN_WIDTH_80P80;
+
+       return NL80211_CHAN_WIDTH_80;
+}
+
+enum ieee80211_sta_rx_bandwidth
 ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
 {
        switch (width) {
@@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
 
        bw = ieee80211_sta_cap_rx_bw(sta);
        bw = min(bw, sta->cur_max_bandwidth);
-
-       /* do not cap the BW of TDLS WIDER_BW peers by the bss */
-       if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
-               bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+       bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
 
        return bw;
 }
@@ -398,7 +418,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
 
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                  struct sta_info *sta, u8 opmode,
-                                 enum ieee80211_band band)
+                                 enum nl80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -484,7 +504,7 @@ EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups);
 
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta, u8 opmode,
-                                enum ieee80211_band band)
+                                enum nl80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
index b18c5ed42d956bd7d9261caa7dd05278f72e2785..0b80a7140cc494d8c39bd3efba2423272d1b8844 100644 (file)
@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
        if (!dev)
                return ERR_PTR(-ENODEV);
 
+       if (IS_ERR(dev))
+               return dev;
+
        /* The caller is holding rtnl anyways, so release the dev reference */
        dev_put(dev);
 
index 0183b32da9427d39aa761e00e080afeff7e4b6d5..bbcf60465e5c439a51b7ef72dcd0d4ddca244ea7 100644 (file)
@@ -31,6 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_TCPV6 |
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
+                                 SKB_GSO_TCP_FIXEDID |
                                  SKB_GSO_TCP_ECN)))
                goto out;
 
index 85ca189bdc3d2b01ee4f202348fdf606f2585cf1..2cb3c626cd4307a51e06b9f0bf5c7b92cdb961a9 100644 (file)
@@ -104,6 +104,7 @@ static inline void ct_write_unlock_bh(unsigned int key)
        spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
+static void ip_vs_conn_expire(unsigned long data);
 
 /*
  *     Returns hash value for IPVS connection entry
@@ -453,10 +454,16 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
 }
 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
 
+static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp)
+{
+       __ip_vs_conn_put(cp);
+       ip_vs_conn_expire((unsigned long)cp);
+}
+
 /*
  *      Put back the conn and restart its timer with its timeout
  */
-void ip_vs_conn_put(struct ip_vs_conn *cp)
+static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
 {
        unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
                0 : cp->timeout;
@@ -465,6 +472,16 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
        __ip_vs_conn_put(cp);
 }
 
+void ip_vs_conn_put(struct ip_vs_conn *cp)
+{
+       if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
+           (atomic_read(&cp->refcnt) == 1) &&
+           !timer_pending(&cp->timer))
+               /* expire connection immediately */
+               __ip_vs_conn_put_notimer(cp);
+       else
+               __ip_vs_conn_put_timer(cp);
+}
 
 /*
  *     Fill a no_client_port connection with a client port number
@@ -819,7 +836,8 @@ static void ip_vs_conn_expire(unsigned long data)
                if (cp->control)
                        ip_vs_control_del(cp);
 
-               if (cp->flags & IP_VS_CONN_F_NFCT) {
+               if ((cp->flags & IP_VS_CONN_F_NFCT) &&
+                   !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
                        /* Do not access conntracks during subsys cleanup
                         * because nf_conntrack_find_get can not be used after
                         * conntrack cleanup for the net.
@@ -834,7 +852,10 @@ static void ip_vs_conn_expire(unsigned long data)
                ip_vs_unbind_dest(cp);
                if (cp->flags & IP_VS_CONN_F_NO_CPORT)
                        atomic_dec(&ip_vs_conn_no_cport_cnt);
-               call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
+               if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+                       ip_vs_conn_rcu_free(&cp->rcu_head);
+               else
+                       call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
                atomic_dec(&ipvs->conn_count);
                return;
        }
@@ -850,7 +871,7 @@ static void ip_vs_conn_expire(unsigned long data)
        if (ipvs->sync_state & IP_VS_STATE_MASTER)
                ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
 
-       ip_vs_conn_put(cp);
+       __ip_vs_conn_put_timer(cp);
 }
 
 /* Modify timer, so that it expires as soon as possible.
@@ -1240,6 +1261,16 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
        return 1;
 }
 
+static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
+{
+       struct ip_vs_service *svc;
+
+       if (!cp->dest)
+               return false;
+       svc = rcu_dereference(cp->dest->svc);
+       return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET);
+}
+
 /* Called from keventd and must protect itself from softirqs */
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
 {
@@ -1254,11 +1285,16 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
                unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
 
                hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
-                       if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-                               /* connection template */
-                               continue;
                        if (cp->ipvs != ipvs)
                                continue;
+                       if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+                               if (atomic_read(&cp->n_control) ||
+                                   !ip_vs_conn_ops_mode(cp))
+                                       continue;
+                               else
+                                       /* connection template of OPS */
+                                       goto try_drop;
+                       }
                        if (cp->protocol == IPPROTO_TCP) {
                                switch(cp->state) {
                                case IP_VS_TCP_S_SYN_RECV:
@@ -1286,6 +1322,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
                                        continue;
                                }
                        } else {
+try_drop:
                                if (!todrop_entry(cp))
                                        continue;
                        }
index b9a4082afa3abb7f2fcbf931ac6594baf474c9f0..1207f20d24e4a25050363e2d6b9e30cc6aaf03b5 100644 (file)
@@ -68,6 +68,7 @@ EXPORT_SYMBOL(ip_vs_conn_put);
 #ifdef CONFIG_IP_VS_DEBUG
 EXPORT_SYMBOL(ip_vs_get_debug_level);
 #endif
+EXPORT_SYMBOL(ip_vs_new_conn_out);
 
 static int ip_vs_net_id __read_mostly;
 /* netns cnt used for uniqueness */
@@ -611,7 +612,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                ret = cp->packet_xmit(skb, cp, pd->pp, iph);
                /* do not touch skb anymore */
 
-               atomic_inc(&cp->in_pkts);
+               if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
+                       atomic_inc(&cp->control->in_pkts);
+               else
+                       atomic_inc(&cp->in_pkts);
                ip_vs_conn_put(cp);
                return ret;
        }
@@ -1100,6 +1104,143 @@ static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
        }
 }
 
+/* Generic function to create new connections for outgoing RS packets
+ *
+ * Pre-requisites for successful connection creation:
+ * 1) Virtual Service is NOT fwmark based:
+ *    In fwmark-VS actual vaddr and vport are unknown to IPVS
+ * 2) Real Server and Virtual Service were NOT configured without port:
+ *    This is to allow match of different VS to the same RS ip-addr
+ */
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+                                     struct ip_vs_dest *dest,
+                                     struct sk_buff *skb,
+                                     const struct ip_vs_iphdr *iph,
+                                     __be16 dport,
+                                     __be16 cport)
+{
+       struct ip_vs_conn_param param;
+       struct ip_vs_conn *ct = NULL, *cp = NULL;
+       const union nf_inet_addr *vaddr, *daddr, *caddr;
+       union nf_inet_addr snet;
+       __be16 vport;
+       unsigned int flags;
+
+       EnterFunction(12);
+       vaddr = &svc->addr;
+       vport = svc->port;
+       daddr = &iph->saddr;
+       caddr = &iph->daddr;
+
+       /* check pre-requisites are satisfied */
+       if (svc->fwmark)
+               return NULL;
+       if (!vport || !dport)
+               return NULL;
+
+       /* for persistent service first create connection template */
+       if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
+               /* apply netmask the same way ingress-side does */
+#ifdef CONFIG_IP_VS_IPV6
+               if (svc->af == AF_INET6)
+                       ipv6_addr_prefix(&snet.in6, &caddr->in6,
+                                        (__force __u32)svc->netmask);
+               else
+#endif
+                       snet.ip = caddr->ip & svc->netmask;
+               /* fill params and create template if not existent */
+               if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
+                                                 &snet, 0, vaddr,
+                                                 vport, &param) < 0)
+                       return NULL;
+               ct = ip_vs_ct_in_get(&param);
+               if (!ct) {
+                       ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
+                                           IP_VS_CONN_F_TEMPLATE, dest, 0);
+                       if (!ct) {
+                               kfree(param.pe_data);
+                               return NULL;
+                       }
+                       ct->timeout = svc->timeout;
+               } else {
+                       kfree(param.pe_data);
+               }
+       }
+
+       /* connection flags */
+       flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
+                iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
+       /* create connection */
+       ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
+                             caddr, cport, vaddr, vport, &param);
+       cp = ip_vs_conn_new(&param, dest->af, daddr, dport, flags, dest, 0);
+       if (!cp) {
+               if (ct)
+                       ip_vs_conn_put(ct);
+               return NULL;
+       }
+       if (ct) {
+               ip_vs_control_add(cp, ct);
+               ip_vs_conn_put(ct);
+       }
+       ip_vs_conn_stats(cp, svc);
+
+       /* return connection (will be used to handle outgoing packet) */
+       IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
+                     "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
+                     ip_vs_fwd_tag(cp),
+                     IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
+                     IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
+                     IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+                     cp->flags, atomic_read(&cp->refcnt));
+       LeaveFunction(12);
+       return cp;
+}
+
+/* Handle outgoing packets which are considered requests initiated by
+ * real servers, so that subsequent responses from external client can be
+ * routed to the right real server.
+ * Used also for outgoing responses in OPS mode.
+ *
+ * Connection management is handled by persistent-engine specific callback.
+ */
+static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
+                                             struct netns_ipvs *ipvs,
+                                             int af, struct sk_buff *skb,
+                                             const struct ip_vs_iphdr *iph)
+{
+       struct ip_vs_dest *dest;
+       struct ip_vs_conn *cp = NULL;
+       __be16 _ports[2], *pptr;
+
+       if (hooknum == NF_INET_LOCAL_IN)
+               return NULL;
+
+       pptr = frag_safe_skb_hp(skb, iph->len,
+                               sizeof(_ports), _ports, iph);
+       if (!pptr)
+               return NULL;
+
+       rcu_read_lock();
+       dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
+                                      &iph->saddr, pptr[0]);
+       if (dest) {
+               struct ip_vs_service *svc;
+               struct ip_vs_pe *pe;
+
+               svc = rcu_dereference(dest->svc);
+               if (svc) {
+                       pe = rcu_dereference(svc->pe);
+                       if (pe && pe->conn_out)
+                               cp = pe->conn_out(svc, dest, skb, iph,
+                                                 pptr[0], pptr[1]);
+               }
+       }
+       rcu_read_unlock();
+
+       return cp;
+}
+
 /* Handle response packets: rewrite addresses and send away...
  */
 static unsigned int
@@ -1245,6 +1386,22 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
 
        if (likely(cp))
                return handle_response(af, skb, pd, cp, &iph, hooknum);
+
+       /* Check for real-server-started requests */
+       if (atomic_read(&ipvs->conn_out_counter)) {
+               /* Currently only for UDP:
+                * connection oriented protocols typically use
+                * ephemeral ports for outgoing connections, so
+                * related incoming responses would not match any VS
+                */
+               if (pp->protocol == IPPROTO_UDP) {
+                       cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
+                       if (likely(cp))
+                               return handle_response(af, skb, pd, cp, &iph,
+                                                      hooknum);
+               }
+       }
+
        if (sysctl_nat_icmp_send(ipvs) &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
@@ -1837,6 +1994,9 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
 
        if (ipvs->sync_state & IP_VS_STATE_MASTER)
                ip_vs_sync_conn(ipvs, cp, pkts);
+       else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
+               /* increment is done inside ip_vs_sync_conn too */
+               atomic_inc(&cp->control->in_pkts);
 
        ip_vs_conn_put(cp);
        return ret;
index 404b2a4f4b5be90f630a20ff592e030f1ed4d671..c3c809b2e7122daabac5b45ffed67979e0fecb28 100644 (file)
@@ -567,6 +567,36 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
        return false;
 }
 
+/* Find real service record by <proto,addr,port>.
+ * In case of multiple records with the same <proto,addr,port>, only
+ * the first found record is returned.
+ *
+ * To be called under RCU lock.
+ */
+struct ip_vs_dest *ip_vs_find_real_service(struct netns_ipvs *ipvs, int af,
+                                          __u16 protocol,
+                                          const union nf_inet_addr *daddr,
+                                          __be16 dport)
+{
+       unsigned int hash;
+       struct ip_vs_dest *dest;
+
+       /* Check for "full" addressed entries */
+       hash = ip_vs_rs_hashkey(af, daddr, dport);
+
+       hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
+               if (dest->port == dport &&
+                   dest->af == af &&
+                   ip_vs_addr_equal(af, &dest->addr, daddr) &&
+                       (dest->protocol == protocol || dest->vfwmark)) {
+                       /* HIT */
+                       return dest;
+               }
+       }
+
+       return NULL;
+}
+
 /* Lookup destination by {addr,port} in the given service
  * Called under RCU lock.
  */
@@ -1253,6 +1283,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
                atomic_inc(&ipvs->ftpsvc_counter);
        else if (svc->port == 0)
                atomic_inc(&ipvs->nullsvc_counter);
+       if (svc->pe && svc->pe->conn_out)
+               atomic_inc(&ipvs->conn_out_counter);
 
        ip_vs_start_estimator(ipvs, &svc->stats);
 
@@ -1293,6 +1325,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
        struct ip_vs_scheduler *sched = NULL, *old_sched;
        struct ip_vs_pe *pe = NULL, *old_pe = NULL;
        int ret = 0;
+       bool new_pe_conn_out, old_pe_conn_out;
 
        /*
         * Lookup the scheduler, by 'u->sched_name'
@@ -1355,8 +1388,16 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
        svc->netmask = u->netmask;
 
        old_pe = rcu_dereference_protected(svc->pe, 1);
-       if (pe != old_pe)
+       if (pe != old_pe) {
                rcu_assign_pointer(svc->pe, pe);
+               /* check for optional methods in new pe */
+               new_pe_conn_out = (pe && pe->conn_out) ? true : false;
+               old_pe_conn_out = (old_pe && old_pe->conn_out) ? true : false;
+               if (new_pe_conn_out && !old_pe_conn_out)
+                       atomic_inc(&svc->ipvs->conn_out_counter);
+               if (old_pe_conn_out && !new_pe_conn_out)
+                       atomic_dec(&svc->ipvs->conn_out_counter);
+       }
 
 out:
        ip_vs_scheduler_put(old_sched);
@@ -1389,6 +1430,8 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
 
        /* Unbind persistence engine, keep svc->pe */
        old_pe = rcu_dereference_protected(svc->pe, 1);
+       if (old_pe && old_pe->conn_out)
+               atomic_dec(&ipvs->conn_out_counter);
        ip_vs_pe_put(old_pe);
 
        /*
@@ -2875,8 +2918,10 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
        if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
            nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
            nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
+                             IPVS_STATS_ATTR_PAD) ||
            nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
            nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
            nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
@@ -2900,16 +2945,26 @@ static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
        if (!nl_stats)
                return -EMSGSIZE;
 
-       if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
+       if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
+                             IPVS_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
+                             IPVS_STATS_ATTR_PAD))
                goto nla_put_failure;
        nla_nest_end(skb, nl_stats);
 
@@ -3957,6 +4012,7 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
                    (unsigned long) ipvs);
        atomic_set(&ipvs->ftpsvc_counter, 0);
        atomic_set(&ipvs->nullsvc_counter, 0);
+       atomic_set(&ipvs->conn_out_counter, 0);
 
        /* procfs stats */
        ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
index 30434fb133df6b7522818de4cffa7e5ab2c8acc6..f04fd8df210b43ae551585316ac865959a0eae1d 100644 (file)
@@ -93,6 +93,10 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
        if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
                return;
 
+       /* Never alter conntrack for OPS conns (no reply is expected) */
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               return;
+
        /* Alter reply only in original direction */
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return;
index 0a6eb5c0d9e9c0c067ef23b57684506831932e89..d07ef9e31c12d824afc9981be2cbcadfca571b6a 100644 (file)
@@ -143,6 +143,20 @@ static int ip_vs_sip_show_pe_data(const struct ip_vs_conn *cp, char *buf)
        return cp->pe_data_len;
 }
 
+static struct ip_vs_conn *
+ip_vs_sip_conn_out(struct ip_vs_service *svc,
+                  struct ip_vs_dest *dest,
+                  struct sk_buff *skb,
+                  const struct ip_vs_iphdr *iph,
+                  __be16 dport,
+                  __be16 cport)
+{
+       if (likely(iph->protocol == IPPROTO_UDP))
+               return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport);
+       /* currently no need to handle other than UDP */
+       return NULL;
+}
+
 static struct ip_vs_pe ip_vs_sip_pe =
 {
        .name =                 "sip",
@@ -153,6 +167,7 @@ static struct ip_vs_pe ip_vs_sip_pe =
        .ct_match =             ip_vs_sip_ct_match,
        .hashkey_raw =          ip_vs_sip_hashkey_raw,
        .show_pe_data =         ip_vs_sip_show_pe_data,
+       .conn_out =             ip_vs_sip_conn_out,
 };
 
 static int __init ip_vs_sip_init(void)
index dc196a0f501def30c16ee0d965b1b1938028f94a..6d19d2eeaa60dc3770e41edf23c33e4e5737d647 100644 (file)
@@ -1013,8 +1013,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (IS_ERR(skb))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af));
-       if (IS_ERR(skb))
+       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
                goto tx_error;
 
        skb->transport_header = skb->network_header;
@@ -1105,8 +1104,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (IS_ERR(skb))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af));
-       if (IS_ERR(skb))
+       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
                goto tx_error;
 
        skb->transport_header = skb->network_header;
index afde5f5e728a320773be246ecbcfcca7f5a16617..566c64e3ec50be4a1340a45bc2b9ea756197912b 100644 (file)
@@ -12,6 +12,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/netfilter.h>
 #include <linux/module.h>
@@ -52,6 +54,7 @@
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_helper.h>
+#include <net/netns/hash.h>
 
 #define NF_CONNTRACK_VERSION   "0.5.0"
 
@@ -66,7 +69,12 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
 
+struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
+static __read_mostly seqcount_t nf_conntrack_generation;
 static __read_mostly bool nf_conntrack_locks_all;
 
 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
@@ -105,7 +113,7 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
                spin_lock_nested(&nf_conntrack_locks[h1],
                                 SINGLE_DEPTH_NESTING);
        }
-       if (read_seqcount_retry(&net->ct.generation, sequence)) {
+       if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
                nf_conntrack_double_unlock(h1, h2);
                return true;
        }
@@ -139,43 +147,43 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
-unsigned int nf_conntrack_hash_rnd __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
+static unsigned int nf_conntrack_hash_rnd __read_mostly;
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+                             const struct net *net)
 {
        unsigned int n;
+       u32 seed;
+
+       get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
 
        /* The direction must be ignored, so we hash everything up to the
         * destination ports (which is a multiple of 4) and treat the last
         * three bytes manually.
         */
+       seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-       return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
+       return jhash2((u32 *)tuple, n, seed ^
                      (((__force __u16)tuple->dst.u.all << 16) |
                      tuple->dst.protonum));
 }
 
-static u32 __hash_bucket(u32 hash, unsigned int size)
-{
-       return reciprocal_scale(hash, size);
-}
-
-static u32 hash_bucket(u32 hash, const struct net *net)
+static u32 scale_hash(u32 hash)
 {
-       return __hash_bucket(hash, net->ct.htable_size);
+       return reciprocal_scale(hash, nf_conntrack_htable_size);
 }
 
-static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 unsigned int size)
+static u32 __hash_conntrack(const struct net *net,
+                           const struct nf_conntrack_tuple *tuple,
+                           unsigned int size)
 {
-       return __hash_bucket(hash_conntrack_raw(tuple), size);
+       return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net,
-                                      const struct nf_conntrack_tuple *tuple)
+static u32 hash_conntrack(const struct net *net,
+                         const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, net->ct.htable_size);
+       return scale_hash(hash_conntrack_raw(tuple, net));
 }
 
 bool
@@ -356,7 +364,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
        }
        rcu_read_lock();
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
-       if (l4proto && l4proto->destroy)
+       if (l4proto->destroy)
                l4proto->destroy(ct);
 
        rcu_read_unlock();
@@ -391,7 +399,7 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
 
        local_bh_disable();
        do {
-               sequence = read_seqcount_begin(&net->ct.generation);
+               sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                reply_hash = hash_conntrack(net,
@@ -443,7 +451,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
                const struct nf_conntrack_tuple *tuple,
-               const struct nf_conntrack_zone *zone)
+               const struct nf_conntrack_zone *zone,
+               const struct net *net)
 {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -452,7 +461,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
         */
        return nf_ct_tuple_equal(tuple, &h->tuple) &&
               nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
-              nf_ct_is_confirmed(ct);
+              nf_ct_is_confirmed(ct) &&
+              net_eq(net, nf_ct_net(ct));
 }
 
 /*
@@ -465,21 +475,23 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
+       struct hlist_nulls_head *ct_hash;
        struct hlist_nulls_node *n;
-       unsigned int bucket = hash_bucket(hash, net);
+       unsigned int bucket, sequence;
 
-       /* Disable BHs the entire time since we normally need to disable them
-        * at least once for the stats anyway.
-        */
-       local_bh_disable();
 begin:
-       hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
-               if (nf_ct_key_equal(h, tuple, zone)) {
-                       NF_CT_STAT_INC(net, found);
-                       local_bh_enable();
+       do {
+               sequence = read_seqcount_begin(&nf_conntrack_generation);
+               bucket = scale_hash(hash);
+               ct_hash = nf_conntrack_hash;
+       } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+       hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
+               if (nf_ct_key_equal(h, tuple, zone, net)) {
+                       NF_CT_STAT_INC_ATOMIC(net, found);
                        return h;
                }
-               NF_CT_STAT_INC(net, searched);
+               NF_CT_STAT_INC_ATOMIC(net, searched);
        }
        /*
         * if the nulls value we got at the end of this lookup is
@@ -487,10 +499,9 @@ begin:
         * We probably met an item that was moved to another chain.
         */
        if (get_nulls_value(n) != bucket) {
-               NF_CT_STAT_INC(net, search_restart);
+               NF_CT_STAT_INC_ATOMIC(net, search_restart);
                goto begin;
        }
-       local_bh_enable();
 
        return NULL;
 }
@@ -512,7 +523,7 @@ begin:
                             !atomic_inc_not_zero(&ct->ct_general.use)))
                        h = NULL;
                else {
-                       if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
+                       if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
                                nf_ct_put(ct);
                                goto begin;
                        }
@@ -528,7 +539,7 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple));
+                                      hash_conntrack_raw(tuple, net));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,12 +547,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
                                       unsigned int hash,
                                       unsigned int reply_hash)
 {
-       struct net *net = nf_ct_net(ct);
-
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-                          &net->ct.hash[hash]);
+                          &nf_conntrack_hash[hash]);
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
-                          &net->ct.hash[reply_hash]);
+                          &nf_conntrack_hash[reply_hash]);
 }
 
 int
@@ -558,7 +567,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
 
        local_bh_disable();
        do {
-               sequence = read_seqcount_begin(&net->ct.generation);
+               sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                reply_hash = hash_conntrack(net,
@@ -566,17 +575,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        /* See if there's one in the list already, including reverse */
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
-               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                     &h->tuple) &&
-                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-                                    NF_CT_DIRECTION(h)))
+       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+               if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                   zone, net))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
-               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-                                     &h->tuple) &&
-                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-                                    NF_CT_DIRECTION(h)))
+
+       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+               if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                   zone, net))
                        goto out;
 
        add_timer(&ct->timeout);
@@ -597,6 +603,62 @@ out:
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 
+static inline void nf_ct_acct_update(struct nf_conn *ct,
+                                    enum ip_conntrack_info ctinfo,
+                                    unsigned int len)
+{
+       struct nf_conn_acct *acct;
+
+       acct = nf_conn_acct_find(ct);
+       if (acct) {
+               struct nf_conn_counter *counter = acct->counter;
+
+               atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
+               atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
+       }
+}
+
+static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                            const struct nf_conn *loser_ct)
+{
+       struct nf_conn_acct *acct;
+
+       acct = nf_conn_acct_find(loser_ct);
+       if (acct) {
+               struct nf_conn_counter *counter = acct->counter;
+               unsigned int bytes;
+
+               /* u32 should be fine since we must have seen one packet. */
+               bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
+               nf_ct_acct_update(ct, ctinfo, bytes);
+       }
+}
+
+/* Resolve race on insertion if this protocol allows this. */
+static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
+                              enum ip_conntrack_info ctinfo,
+                              struct nf_conntrack_tuple_hash *h)
+{
+       /* This is the conntrack entry already in hashes that won race. */
+       struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+       struct nf_conntrack_l4proto *l4proto;
+
+       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       if (l4proto->allow_clash &&
+           !nf_ct_is_dying(ct) &&
+           atomic_inc_not_zero(&ct->ct_general.use)) {
+               nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
+               nf_conntrack_put(skb->nfct);
+               /* Assign conntrack already in hashes to this skbuff. Don't
+                * modify skb->nfctinfo to ensure consistent stateful filtering.
+                */
+               skb->nfct = &ct->ct_general;
+               return NF_ACCEPT;
+       }
+       NF_CT_STAT_INC(net, drop);
+       return NF_DROP;
+}
+
 /* Confirm a connection given skb; places it in hash table */
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
@@ -611,6 +673,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        enum ip_conntrack_info ctinfo;
        struct net *net;
        unsigned int sequence;
+       int ret = NF_DROP;
 
        ct = nf_ct_get(skb, &ctinfo);
        net = nf_ct_net(ct);
@@ -626,10 +689,10 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        local_bh_disable();
 
        do {
-               sequence = read_seqcount_begin(&net->ct.generation);
+               sequence = read_seqcount_begin(&nf_conntrack_generation);
                /* reuse the hash saved before */
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
-               hash = hash_bucket(hash, net);
+               hash = scale_hash(hash);
                reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
@@ -653,23 +716,22 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-       if (unlikely(nf_ct_is_dying(ct)))
-               goto out;
+       if (unlikely(nf_ct_is_dying(ct))) {
+               nf_ct_add_to_dying_list(ct);
+               goto dying;
+       }
 
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
-               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                     &h->tuple) &&
-                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-                                    NF_CT_DIRECTION(h)))
+       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+               if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                   zone, net))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
-               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-                                     &h->tuple) &&
-                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-                                    NF_CT_DIRECTION(h)))
+
+       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+               if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                   zone, net))
                        goto out;
 
        /* Timer relative to confirmation time, not original
@@ -708,10 +770,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
 
 out:
        nf_ct_add_to_dying_list(ct);
+       ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
+dying:
        nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
        local_bh_enable();
-       return NF_DROP;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 
@@ -724,29 +788,31 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct net *net = nf_ct_net(ignored_conntrack);
        const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple_hash *h;
+       struct hlist_nulls_head *ct_hash;
+       unsigned int hash, sequence;
        struct hlist_nulls_node *n;
        struct nf_conn *ct;
-       unsigned int hash;
 
        zone = nf_ct_zone(ignored_conntrack);
-       hash = hash_conntrack(net, tuple);
 
-       /* Disable BHs the entire time since we need to disable them at
-        * least once for the stats anyway.
-        */
-       rcu_read_lock_bh();
-       hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
+       rcu_read_lock();
+       do {
+               sequence = read_seqcount_begin(&nf_conntrack_generation);
+               hash = hash_conntrack(net, tuple);
+               ct_hash = nf_conntrack_hash;
+       } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+       hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (ct != ignored_conntrack &&
-                   nf_ct_tuple_equal(tuple, &h->tuple) &&
-                   nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
-                       NF_CT_STAT_INC(net, found);
-                       rcu_read_unlock_bh();
+                   nf_ct_key_equal(h, tuple, zone, net)) {
+                       NF_CT_STAT_INC_ATOMIC(net, found);
+                       rcu_read_unlock();
                        return 1;
                }
-               NF_CT_STAT_INC(net, searched);
+               NF_CT_STAT_INC_ATOMIC(net, searched);
        }
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return 0;
 }
@@ -760,71 +826,63 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
 {
        /* Use oldest entry, which is roughly LRU */
        struct nf_conntrack_tuple_hash *h;
-       struct nf_conn *ct = NULL, *tmp;
+       struct nf_conn *tmp;
        struct hlist_nulls_node *n;
-       unsigned int i = 0, cnt = 0;
-       int dropped = 0;
-       unsigned int hash, sequence;
+       unsigned int i, hash, sequence;
+       struct nf_conn *ct = NULL;
        spinlock_t *lockp;
+       bool ret = false;
+
+       i = 0;
 
        local_bh_disable();
 restart:
-       sequence = read_seqcount_begin(&net->ct.generation);
-       hash = hash_bucket(_hash, net);
-       for (; i < net->ct.htable_size; i++) {
+       sequence = read_seqcount_begin(&nf_conntrack_generation);
+       for (; i < NF_CT_EVICTION_RANGE; i++) {
+               hash = scale_hash(_hash++);
                lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
                nf_conntrack_lock(lockp);
-               if (read_seqcount_retry(&net->ct.generation, sequence)) {
+               if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
                        spin_unlock(lockp);
                        goto restart;
                }
-               hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
-                                        hnnode) {
+               hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
+                                              hnnode) {
                        tmp = nf_ct_tuplehash_to_ctrack(h);
-                       if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
-                           !nf_ct_is_dying(tmp) &&
-                           atomic_inc_not_zero(&tmp->ct_general.use)) {
+
+                       if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
+                           !net_eq(nf_ct_net(tmp), net) ||
+                           nf_ct_is_dying(tmp))
+                               continue;
+
+                       if (atomic_inc_not_zero(&tmp->ct_general.use)) {
                                ct = tmp;
                                break;
                        }
-                       cnt++;
                }
 
-               hash = (hash + 1) % net->ct.htable_size;
                spin_unlock(lockp);
-
-               if (ct || cnt >= NF_CT_EVICTION_RANGE)
+               if (ct)
                        break;
-
        }
+
        local_bh_enable();
 
        if (!ct)
-               return dropped;
+               return false;
 
-       if (del_timer(&ct->timeout)) {
+       /* kill only if in same netns -- might have moved due to
+        * SLAB_DESTROY_BY_RCU rules
+        */
+       if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
                if (nf_ct_delete(ct, 0, 0)) {
-                       dropped = 1;
                        NF_CT_STAT_INC_ATOMIC(net, early_drop);
+                       ret = true;
                }
        }
-       nf_ct_put(ct);
-       return dropped;
-}
-
-void init_nf_conntrack_hash_rnd(void)
-{
-       unsigned int rand;
 
-       /*
-        * Why not initialize nf_conntrack_rnd in a "init()" function ?
-        * Because there isn't enough entropy when system initializing,
-        * and we initialize it as late as possible.
-        */
-       do {
-               get_random_bytes(&rand, sizeof(rand));
-       } while (!rand);
-       cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
+       nf_ct_put(ct);
+       return ret;
 }
 
 static struct nf_conn *
@@ -836,12 +894,6 @@ __nf_conntrack_alloc(struct net *net,
 {
        struct nf_conn *ct;
 
-       if (unlikely(!nf_conntrack_hash_rnd)) {
-               init_nf_conntrack_hash_rnd();
-               /* recompute the hash as nf_conntrack_hash_rnd is initialized */
-               hash = hash_conntrack_raw(orig);
-       }
-
        /* We don't want any race condition at early drop stage */
        atomic_inc(&net->ct.count);
 
@@ -858,7 +910,7 @@ __nf_conntrack_alloc(struct net *net,
         * Do not use kmem_cache_zalloc(), as this cache uses
         * SLAB_DESTROY_BY_RCU.
         */
-       ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
+       ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
        if (ct == NULL)
                goto out;
 
@@ -885,7 +937,7 @@ __nf_conntrack_alloc(struct net *net,
        atomic_set(&ct->ct_general.use, 0);
        return ct;
 out_free:
-       kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+       kmem_cache_free(nf_conntrack_cachep, ct);
 out:
        atomic_dec(&net->ct.count);
        return ERR_PTR(-ENOMEM);
@@ -912,7 +964,7 @@ void nf_conntrack_free(struct nf_conn *ct)
 
        nf_ct_ext_destroy(ct);
        nf_ct_ext_free(ct);
-       kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+       kmem_cache_free(nf_conntrack_cachep, ct);
        smp_mb__before_atomic();
        atomic_dec(&net->ct.count);
 }
@@ -966,7 +1018,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 
        if (!l4proto->new(ct, skb, dataoff, timeouts)) {
                nf_conntrack_free(ct);
-               pr_debug("init conntrack: can't track with proto module\n");
+               pr_debug("can't track with proto module\n");
                return NULL;
        }
 
@@ -988,7 +1040,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                spin_lock(&nf_conntrack_expect_lock);
                exp = nf_ct_find_expectation(net, zone, tuple);
                if (exp) {
-                       pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
+                       pr_debug("expectation arrives ct=%p exp=%p\n",
                                 ct, exp);
                        /* Welcome, Mr. Bond.  We've been expecting you... */
                        __set_bit(IPS_EXPECTED_BIT, &ct->status);
@@ -1053,13 +1105,13 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, l3num, protonum, net, &tuple, l3proto,
                             l4proto)) {
-               pr_debug("resolve_normal_ct: Can't get tuple\n");
+               pr_debug("Can't get tuple\n");
                return NULL;
        }
 
        /* look for tuple match */
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
-       hash = hash_conntrack_raw(&tuple);
+       hash = hash_conntrack_raw(&tuple, net);
        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1079,14 +1131,13 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        } else {
                /* Once we've had two way comms, always ESTABLISHED. */
                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
-                       pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
+                       pr_debug("normal packet for %p\n", ct);
                        *ctinfo = IP_CT_ESTABLISHED;
                } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
-                       pr_debug("nf_conntrack_in: related packet for %p\n",
-                                ct);
+                       pr_debug("related packet for %p\n", ct);
                        *ctinfo = IP_CT_RELATED;
                } else {
-                       pr_debug("nf_conntrack_in: new packet for %p\n", ct);
+                       pr_debug("new packet for %p\n", ct);
                        *ctinfo = IP_CT_NEW;
                }
                *set_reply = 0;
@@ -1269,17 +1320,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        }
 
 acct:
-       if (do_acct) {
-               struct nf_conn_acct *acct;
-
-               acct = nf_conn_acct_find(ct);
-               if (acct) {
-                       struct nf_conn_counter *counter = acct->counter;
-
-                       atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
-                       atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
-               }
-       }
+       if (do_acct)
+               nf_ct_acct_update(ct, ctinfo, skb->len);
 }
 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
@@ -1288,18 +1330,8 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
                       const struct sk_buff *skb,
                       int do_acct)
 {
-       if (do_acct) {
-               struct nf_conn_acct *acct;
-
-               acct = nf_conn_acct_find(ct);
-               if (acct) {
-                       struct nf_conn_counter *counter = acct->counter;
-
-                       atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
-                       atomic64_add(skb->len - skb_network_offset(skb),
-                                    &counter[CTINFO2DIR(ctinfo)].bytes);
-               }
-       }
+       if (do_acct)
+               nf_ct_acct_update(ct, ctinfo, skb->len);
 
        if (del_timer(&ct->timeout)) {
                ct->timeout.function((unsigned long)ct);
@@ -1395,16 +1427,17 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        int cpu;
        spinlock_t *lockp;
 
-       for (; *bucket < net->ct.htable_size; (*bucket)++) {
+       for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
                local_bh_disable();
                nf_conntrack_lock(lockp);
-               if (*bucket < net->ct.htable_size) {
-                       hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+               if (*bucket < nf_conntrack_htable_size) {
+                       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
                                if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
                                        continue;
                                ct = nf_ct_tuplehash_to_ctrack(h);
-                               if (iter(ct, data))
+                               if (net_eq(nf_ct_net(ct), net) &&
+                                   iter(ct, data))
                                        goto found;
                        }
                }
@@ -1442,6 +1475,9 @@ void nf_ct_iterate_cleanup(struct net *net,
 
        might_sleep();
 
+       if (atomic_read(&net->ct.count) == 0)
+               return;
+
        while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
                /* Time to push up daises... */
                if (del_timer(&ct->timeout))
@@ -1493,6 +1529,8 @@ void nf_conntrack_cleanup_end(void)
        while (untrack_refs() > 0)
                schedule();
 
+       nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
        nf_ct_extend_unregister(&nf_ct_zone_extend);
 #endif
@@ -1543,15 +1581,12 @@ i_see_dead_people:
        }
 
        list_for_each_entry(net, net_exit_list, exit_list) {
-               nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
                nf_conntrack_proto_pernet_fini(net);
                nf_conntrack_helper_pernet_fini(net);
                nf_conntrack_ecache_pernet_fini(net);
                nf_conntrack_tstamp_pernet_fini(net);
                nf_conntrack_acct_pernet_fini(net);
                nf_conntrack_expect_pernet_fini(net);
-               kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-               kfree(net->ct.slabname);
                free_percpu(net->ct.stat);
                free_percpu(net->ct.pcpu_lists);
        }
@@ -1606,7 +1641,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 
        local_bh_disable();
        nf_conntrack_all_lock();
-       write_seqcount_begin(&init_net.ct.generation);
+       write_seqcount_begin(&nf_conntrack_generation);
 
        /* Lookups in the old hash might happen in parallel, which means we
         * might get false negatives during connection lookup. New connections
@@ -1614,26 +1649,28 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
         * though since that required taking the locks.
         */
 
-       for (i = 0; i < init_net.ct.htable_size; i++) {
-               while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
-                       h = hlist_nulls_entry(init_net.ct.hash[i].first,
-                                       struct nf_conntrack_tuple_hash, hnnode);
+       for (i = 0; i < nf_conntrack_htable_size; i++) {
+               while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+                       h = hlist_nulls_entry(nf_conntrack_hash[i].first,
+                                             struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, hashsize);
+                       bucket = __hash_conntrack(nf_ct_net(ct),
+                                                 &h->tuple, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
-       old_size = init_net.ct.htable_size;
-       old_hash = init_net.ct.hash;
+       old_size = nf_conntrack_htable_size;
+       old_hash = nf_conntrack_hash;
 
-       init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-       init_net.ct.hash = hash;
+       nf_conntrack_hash = hash;
+       nf_conntrack_htable_size = hashsize;
 
-       write_seqcount_end(&init_net.ct.generation);
+       write_seqcount_end(&nf_conntrack_generation);
        nf_conntrack_all_unlock();
        local_bh_enable();
 
+       synchronize_net();
        nf_ct_free_hashtable(old_hash, old_size);
        return 0;
 }
@@ -1654,7 +1691,10 @@ EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
 int nf_conntrack_init_start(void)
 {
        int max_factor = 8;
-       int i, ret, cpu;
+       int ret = -ENOMEM;
+       int i, cpu;
+
+       seqcount_init(&nf_conntrack_generation);
 
        for (i = 0; i < CONNTRACK_LOCKS; i++)
                spin_lock_init(&nf_conntrack_locks[i]);
@@ -1681,8 +1721,19 @@ int nf_conntrack_init_start(void)
                 * entries. */
                max_factor = 4;
        }
+
+       nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
+       if (!nf_conntrack_hash)
+               return -ENOMEM;
+
        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
 
+       nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+                                               sizeof(struct nf_conn), 0,
+                                               SLAB_DESTROY_BY_RCU, NULL);
+       if (!nf_conntrack_cachep)
+               goto err_cachep;
+
        printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
               nf_conntrack_max);
@@ -1759,6 +1810,9 @@ err_tstamp:
 err_acct:
        nf_conntrack_expect_fini();
 err_expect:
+       kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
+       nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
        return ret;
 }
 
@@ -1782,7 +1836,6 @@ int nf_conntrack_init_net(struct net *net)
        int cpu;
 
        atomic_set(&net->ct.count, 0);
-       seqcount_init(&net->ct.generation);
 
        net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
        if (!net->ct.pcpu_lists)
@@ -1800,24 +1853,6 @@ int nf_conntrack_init_net(struct net *net)
        if (!net->ct.stat)
                goto err_pcpu_lists;
 
-       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
-       if (!net->ct.slabname)
-               goto err_slabname;
-
-       net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
-                                                       sizeof(struct nf_conn), 0,
-                                                       SLAB_DESTROY_BY_RCU, NULL);
-       if (!net->ct.nf_conntrack_cachep) {
-               printk(KERN_ERR "Unable to create nf_conn slab cache\n");
-               goto err_cache;
-       }
-
-       net->ct.htable_size = nf_conntrack_htable_size;
-       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
-       if (!net->ct.hash) {
-               printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
-               goto err_hash;
-       }
        ret = nf_conntrack_expect_pernet_init(net);
        if (ret < 0)
                goto err_expect;
@@ -1849,12 +1884,6 @@ err_tstamp:
 err_acct:
        nf_conntrack_expect_pernet_fini(net);
 err_expect:
-       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
-err_hash:
-       kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-err_cache:
-       kfree(net->ct.slabname);
-err_slabname:
        free_percpu(net->ct.stat);
 err_pcpu_lists:
        free_percpu(net->ct.pcpu_lists);
index 4e78c57b818f7d2cc387c793fe5a100e03fe067e..d28011b428455d83d1230210982699cb8774c55b 100644 (file)
@@ -113,6 +113,60 @@ static void ecache_work(struct work_struct *work)
                schedule_delayed_work(&ctnet->ecache_dwork, delay);
 }
 
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+                                 u32 portid, int report)
+{
+       int ret = 0;
+       struct net *net = nf_ct_net(ct);
+       struct nf_ct_event_notifier *notify;
+       struct nf_conntrack_ecache *e;
+
+       rcu_read_lock();
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+       if (!notify)
+               goto out_unlock;
+
+       e = nf_ct_ecache_find(ct);
+       if (!e)
+               goto out_unlock;
+
+       if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
+               struct nf_ct_event item = {
+                       .ct     = ct,
+                       .portid = e->portid ? e->portid : portid,
+                       .report = report
+               };
+               /* This is a resent of a destroy event? If so, skip missed */
+               unsigned long missed = e->portid ? 0 : e->missed;
+
+               if (!((eventmask | missed) & e->ctmask))
+                       goto out_unlock;
+
+               ret = notify->fcn(eventmask | missed, &item);
+               if (unlikely(ret < 0 || missed)) {
+                       spin_lock_bh(&ct->lock);
+                       if (ret < 0) {
+                               /* This is a destroy event that has been
+                                * triggered by a process, we store the PORTID
+                                * to include it in the retransmission.
+                                */
+                               if (eventmask & (1 << IPCT_DESTROY) &&
+                                   e->portid == 0 && portid != 0)
+                                       e->portid = portid;
+                               else
+                                       e->missed |= eventmask;
+                       } else {
+                               e->missed &= ~missed;
+                       }
+                       spin_unlock_bh(&ct->lock);
+               }
+       }
+out_unlock:
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
+
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
@@ -167,6 +221,36 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+                              struct nf_conntrack_expect *exp,
+                              u32 portid, int report)
+
+{
+       struct net *net = nf_ct_exp_net(exp);
+       struct nf_exp_event_notifier *notify;
+       struct nf_conntrack_ecache *e;
+
+       rcu_read_lock();
+       notify = rcu_dereference(net->ct.nf_expect_event_cb);
+       if (!notify)
+               goto out_unlock;
+
+       e = nf_ct_ecache_find(exp->master);
+       if (!e)
+               goto out_unlock;
+
+       if (e->expmask & (1 << event)) {
+               struct nf_exp_event item = {
+                       .exp    = exp,
+                       .portid = portid,
+                       .report = report
+               };
+               notify->fcn(1 << event, &item);
+       }
+out_unlock:
+       rcu_read_unlock();
+}
+
 int nf_conntrack_register_notifier(struct net *net,
                                   struct nf_ct_event_notifier *new)
 {
index 278927ab09483adbd84611e1d5b7b21fbd3cb048..9e3693128313ffd57845fcabc28916f4b86515c2 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/moduleparam.h>
 #include <linux/export.h>
 #include <net/net_namespace.h>
+#include <net/netns/hash.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 unsigned int nf_ct_expect_hsize __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
 
+struct hlist_head *nf_ct_expect_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
+
 unsigned int nf_ct_expect_max __read_mostly;
 
 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
+static unsigned int nf_ct_expect_hashrnd __read_mostly;
 
 /* nf_conntrack_expect helper functions */
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
@@ -72,21 +77,32 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
        nf_ct_expect_put(exp);
 }
 
-static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
+static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
 {
-       unsigned int hash;
+       unsigned int hash, seed;
 
-       if (unlikely(!nf_conntrack_hash_rnd)) {
-               init_nf_conntrack_hash_rnd();
-       }
+       get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
+
+       seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
 
        hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
                      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
-                      (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
+                      (__force __u16)tuple->dst.u.all) ^ seed);
 
        return reciprocal_scale(hash, nf_ct_expect_hsize);
 }
 
+static bool
+nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
+               const struct nf_conntrack_expect *i,
+               const struct nf_conntrack_zone *zone,
+               const struct net *net)
+{
+       return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
+              net_eq(net, nf_ct_net(i->master)) &&
+              nf_ct_zone_equal_any(i->master, zone);
+}
+
 struct nf_conntrack_expect *
 __nf_ct_expect_find(struct net *net,
                    const struct nf_conntrack_zone *zone,
@@ -98,10 +114,9 @@ __nf_ct_expect_find(struct net *net,
        if (!net->ct.expect_count)
                return NULL;
 
-       h = nf_ct_expect_dst_hash(tuple);
-       hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
-               if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone_equal_any(i->master, zone))
+       h = nf_ct_expect_dst_hash(net, tuple);
+       hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+               if (nf_ct_exp_equal(tuple, i, zone, net))
                        return i;
        }
        return NULL;
@@ -139,11 +154,10 @@ nf_ct_find_expectation(struct net *net,
        if (!net->ct.expect_count)
                return NULL;
 
-       h = nf_ct_expect_dst_hash(tuple);
-       hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
+       h = nf_ct_expect_dst_hash(net, tuple);
+       hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
                if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
-                   nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone_equal_any(i->master, zone)) {
+                   nf_ct_exp_equal(tuple, i, zone, net)) {
                        exp = i;
                        break;
                }
@@ -223,6 +237,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
        }
 
        return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
+              net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
               nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
@@ -232,6 +247,7 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
        return a->master == b->master && a->class == b->class &&
               nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
               nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+              net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
               nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
@@ -342,7 +358,7 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        struct nf_conn_help *master_help = nfct_help(exp->master);
        struct nf_conntrack_helper *helper;
        struct net *net = nf_ct_exp_net(exp);
-       unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
+       unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
 
        /* two references : one for hash insert, one for the timer */
        atomic_add(2, &exp->use);
@@ -350,7 +366,7 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        hlist_add_head(&exp->lnode, &master_help->expectations);
        master_help->expecting[exp->class]++;
 
-       hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
+       hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
        net->ct.expect_count++;
 
        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
@@ -401,8 +417,8 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
                ret = -ESHUTDOWN;
                goto out;
        }
-       h = nf_ct_expect_dst_hash(&expect->tuple);
-       hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
+       h = nf_ct_expect_dst_hash(net, &expect->tuple);
+       hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
                if (expect_matches(i, expect)) {
                        if (del_timer(&i->timeout)) {
                                nf_ct_unlink_expect(i);
@@ -468,12 +484,11 @@ struct ct_expect_iter_state {
 
 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-               n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+               n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -483,14 +498,13 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
                                             struct hlist_node *head)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
        head = rcu_dereference(hlist_next_rcu(head));
        while (head == NULL) {
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
-               head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+               head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
        }
        return head;
 }
@@ -623,28 +637,13 @@ module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
 
 int nf_conntrack_expect_pernet_init(struct net *net)
 {
-       int err = -ENOMEM;
-
        net->ct.expect_count = 0;
-       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
-       if (net->ct.expect_hash == NULL)
-               goto err1;
-
-       err = exp_proc_init(net);
-       if (err < 0)
-               goto err2;
-
-       return 0;
-err2:
-       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
-err1:
-       return err;
+       return exp_proc_init(net);
 }
 
 void nf_conntrack_expect_pernet_fini(struct net *net)
 {
        exp_proc_remove(net);
-       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
 
 int nf_conntrack_expect_init(void)
@@ -660,6 +659,13 @@ int nf_conntrack_expect_init(void)
                                0, 0, NULL);
        if (!nf_ct_expect_cachep)
                return -ENOMEM;
+
+       nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
+       if (!nf_ct_expect_hash) {
+               kmem_cache_destroy(nf_ct_expect_cachep);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
@@ -667,4 +673,5 @@ void nf_conntrack_expect_fini(void)
 {
        rcu_barrier(); /* Wait for call_rcu() before destroy */
        kmem_cache_destroy(nf_ct_expect_cachep);
+       nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
 }
index 3b40ec575cd56a7b73c0b25d9c8decdda979d842..f703adb7e5f7166ca47380aedd8ad0074e361ee4 100644 (file)
@@ -38,10 +38,10 @@ unsigned int nf_ct_helper_hsize __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
 static unsigned int nf_ct_helper_count __read_mostly;
 
-static bool nf_ct_auto_assign_helper __read_mostly = true;
+static bool nf_ct_auto_assign_helper __read_mostly = false;
 module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
 MODULE_PARM_DESC(nf_conntrack_helper,
-                "Enable automatic conntrack helper assignment (default 1)");
+                "Enable automatic conntrack helper assignment (default 0)");
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table helper_sysctl_table[] = {
@@ -400,7 +400,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
        spin_lock_bh(&nf_conntrack_expect_lock);
        for (i = 0; i < nf_ct_expect_hsize; i++) {
                hlist_for_each_entry_safe(exp, next,
-                                         &net->ct.expect_hash[i], hnode) {
+                                         &nf_ct_expect_hash[i], hnode) {
                        struct nf_conn_help *help = nfct_help(exp->master);
                        if ((rcu_dereference_protected(
                                        help->helper,
@@ -424,10 +424,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
                spin_unlock_bh(&pcpu->lock);
        }
        local_bh_disable();
-       for (i = 0; i < net->ct.htable_size; i++) {
+       for (i = 0; i < nf_conntrack_htable_size; i++) {
                nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
-               if (i < net->ct.htable_size) {
-                       hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+               if (i < nf_conntrack_htable_size) {
+                       hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
                                unhelp(h, me);
                }
                spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
index 3ce5c314ea4bc63ebe5cfc294f6f335726ed85c0..252e6a7cd2f15f1d687069079835d84131579a4d 100644 (file)
 
 static spinlock_t nf_connlabels_lock;
 
-static unsigned int label_bits(const struct nf_conn_labels *l)
-{
-       unsigned int longs = l->words;
-       return longs * BITS_PER_LONG;
-}
-
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit)
-{
-       struct nf_conn_labels *labels = nf_ct_labels_find(ct);
-
-       if (!labels)
-               return false;
-
-       return bit < label_bits(labels) && test_bit(bit, labels->bits);
-}
-EXPORT_SYMBOL_GPL(nf_connlabel_match);
-
 int nf_connlabel_set(struct nf_conn *ct, u16 bit)
 {
        struct nf_conn_labels *labels = nf_ct_labels_find(ct);
 
-       if (!labels || bit >= label_bits(labels))
+       if (!labels || BIT_WORD(bit) >= labels->words)
                return -ENOSPC;
 
        if (test_bit(bit, labels->bits))
@@ -50,14 +33,18 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
 }
 EXPORT_SYMBOL_GPL(nf_connlabel_set);
 
-static void replace_u32(u32 *address, u32 mask, u32 new)
+static int replace_u32(u32 *address, u32 mask, u32 new)
 {
        u32 old, tmp;
 
        do {
                old = *address;
                tmp = (old & mask) ^ new;
+               if (old == tmp)
+                       return 0;
        } while (cmpxchg(address, old, tmp) != old);
+
+       return 1;
 }
 
 int nf_connlabels_replace(struct nf_conn *ct,
@@ -66,6 +53,7 @@ int nf_connlabels_replace(struct nf_conn *ct,
 {
        struct nf_conn_labels *labels;
        unsigned int size, i;
+       int changed = 0;
        u32 *dst;
 
        labels = nf_ct_labels_find(ct);
@@ -77,29 +65,27 @@ int nf_connlabels_replace(struct nf_conn *ct,
                words32 = size / sizeof(u32);
 
        dst = (u32 *) labels->bits;
-       if (words32) {
-               for (i = 0; i < words32; i++)
-                       replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
-       }
+       for (i = 0; i < words32; i++)
+               changed |= replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
 
        size /= sizeof(u32);
        for (i = words32; i < size; i++) /* pad */
                replace_u32(&dst[i], 0, 0);
 
-       nf_conntrack_event_cache(IPCT_LABEL, ct);
+       if (changed)
+               nf_conntrack_event_cache(IPCT_LABEL, ct);
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_connlabels_replace);
 
-int nf_connlabels_get(struct net *net, unsigned int n_bits)
+int nf_connlabels_get(struct net *net, unsigned int bits)
 {
        size_t words;
 
-       if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE))
+       words = BIT_WORD(bits) + 1;
+       if (words > NF_CT_LABELS_MAX_SIZE / sizeof(long))
                return -ERANGE;
 
-       words = BITS_TO_LONGS(n_bits);
-
        spin_lock(&nf_connlabels_lock);
        net->ct.labels_used++;
        if (words > net->ct.label_words)
@@ -128,6 +114,8 @@ static struct nf_ct_ext_type labels_extend __read_mostly = {
 
 int nf_conntrack_labels_init(void)
 {
+       BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE / sizeof(long) >= U8_MAX);
+
        spin_lock_init(&nf_connlabels_lock);
        return nf_ct_extend_register(&labels_extend);
 }
index 355e8552fd5b77682295493a22dc0a03d9338255..a18d1ceabad52368a38df3760b03cf841b2b8937 100644 (file)
@@ -58,10 +58,9 @@ MODULE_LICENSE("GPL");
 
 static char __initdata version[] = "0.93";
 
-static inline int
-ctnetlink_dump_tuples_proto(struct sk_buff *skb,
-                           const struct nf_conntrack_tuple *tuple,
-                           struct nf_conntrack_l4proto *l4proto)
+static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
+                                      const struct nf_conntrack_tuple *tuple,
+                                      struct nf_conntrack_l4proto *l4proto)
 {
        int ret = 0;
        struct nlattr *nest_parms;
@@ -83,10 +82,9 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_tuples_ip(struct sk_buff *skb,
-                        const struct nf_conntrack_tuple *tuple,
-                        struct nf_conntrack_l3proto *l3proto)
+static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
+                                   const struct nf_conntrack_tuple *tuple,
+                                   struct nf_conntrack_l3proto *l3proto)
 {
        int ret = 0;
        struct nlattr *nest_parms;
@@ -106,9 +104,8 @@ nla_put_failure:
        return -1;
 }
 
-static int
-ctnetlink_dump_tuples(struct sk_buff *skb,
-                     const struct nf_conntrack_tuple *tuple)
+static int ctnetlink_dump_tuples(struct sk_buff *skb,
+                                const struct nf_conntrack_tuple *tuple)
 {
        int ret;
        struct nf_conntrack_l3proto *l3proto;
@@ -127,9 +124,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
        return ret;
 }
 
-static inline int
-ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
-                      const struct nf_conntrack_zone *zone, int dir)
+static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+                                 const struct nf_conntrack_zone *zone, int dir)
 {
        if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
                return 0;
@@ -141,8 +137,7 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
        if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
                goto nla_put_failure;
@@ -152,8 +147,7 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
        long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
@@ -168,8 +162,7 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
+static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
 {
        struct nf_conntrack_l4proto *l4proto;
        struct nlattr *nest_proto;
@@ -193,8 +186,8 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
+                                  const struct nf_conn *ct)
 {
        struct nlattr *nest_helper;
        const struct nf_conn_help *help = nfct_help(ct);
@@ -245,8 +238,10 @@ dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
        if (!nest_count)
                goto nla_put_failure;
 
-       if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
-           nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+       if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
+                        CTA_COUNTERS_PAD) ||
+           nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
+                        CTA_COUNTERS_PAD))
                goto nla_put_failure;
 
        nla_nest_end(skb, nest_count);
@@ -287,9 +282,11 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
        if (!nest_count)
                goto nla_put_failure;
 
-       if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+       if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
+                        CTA_TIMESTAMP_PAD) ||
            (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
-                                              cpu_to_be64(tstamp->stop))))
+                                              cpu_to_be64(tstamp->stop),
+                                              CTA_TIMESTAMP_PAD)))
                goto nla_put_failure;
        nla_nest_end(skb, nest_count);
 
@@ -300,8 +297,7 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-static inline int
-ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
        if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
                goto nla_put_failure;
@@ -315,8 +311,7 @@ nla_put_failure:
 #endif
 
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
-static inline int
-ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
 {
        struct nlattr *nest_secctx;
        int len, ret;
@@ -345,7 +340,7 @@ nla_put_failure:
 #endif
 
 #ifdef CONFIG_NF_CONNTRACK_LABELS
-static int ctnetlink_label_size(const struct nf_conn *ct)
+static inline int ctnetlink_label_size(const struct nf_conn *ct)
 {
        struct nf_conn_labels *labels = nf_ct_labels_find(ct);
 
@@ -380,8 +375,7 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
 
 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
 
-static inline int
-ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
 {
        struct nlattr *nest_parms;
 
@@ -426,8 +420,8 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb,
+                                    const struct nf_conn *ct)
 {
        struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
        struct nf_ct_seqadj *seq;
@@ -446,8 +440,7 @@ ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
        return 0;
 }
 
-static inline int
-ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
        if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
                goto nla_put_failure;
@@ -457,8 +450,7 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
        if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
                goto nla_put_failure;
@@ -538,8 +530,7 @@ nla_put_failure:
        return -1;
 }
 
-static inline size_t
-ctnetlink_proto_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_proto_size(const struct nf_conn *ct)
 {
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
@@ -556,19 +547,17 @@ ctnetlink_proto_size(const struct nf_conn *ct)
        return len;
 }
 
-static inline size_t
-ctnetlink_acct_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
 {
        if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
                return 0;
        return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
-              + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
-              + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
+              + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
+              + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
               ;
 }
 
-static inline int
-ctnetlink_secctx_size(const struct nf_conn *ct)
+static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
 {
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
        int len, ret;
@@ -584,20 +573,19 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
 #endif
 }
 
-static inline size_t
-ctnetlink_timestamp_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
 {
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
        if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
                return 0;
-       return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+       return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
 #else
        return 0;
 #endif
 }
 
-static inline size_t
-ctnetlink_nlmsg_size(const struct nf_conn *ct)
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
        return NLMSG_ALIGN(sizeof(struct nfgenmsg))
               + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
@@ -628,7 +616,6 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               ;
 }
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
@@ -837,19 +824,22 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        last = (struct nf_conn *)cb->args[1];
 
        local_bh_disable();
-       for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
+       for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
 restart:
                lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
                nf_conntrack_lock(lockp);
-               if (cb->args[0] >= net->ct.htable_size) {
+               if (cb->args[0] >= nf_conntrack_htable_size) {
                        spin_unlock(lockp);
                        goto out;
                }
-               hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
-                                        hnnode) {
+               hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
+                                          hnnode) {
                        if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
                                continue;
                        ct = nf_ct_tuplehash_to_ctrack(h);
+                       if (!net_eq(net, nf_ct_net(ct)))
+                               continue;
+
                        /* Dump entries of a given L3 protocol number.
                         * If it is not specified, ie. l3proto == 0,
                         * then dump everything. */
@@ -891,8 +881,8 @@ out:
        return skb->len;
 }
 
-static inline int
-ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
+static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
+                                   struct nf_conntrack_tuple *tuple)
 {
        struct nlattr *tb[CTA_IP_MAX+1];
        struct nf_conntrack_l3proto *l3proto;
@@ -921,9 +911,8 @@ static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
        [CTA_PROTO_NUM] = { .type = NLA_U8 },
 };
 
-static inline int
-ctnetlink_parse_tuple_proto(struct nlattr *attr,
-                           struct nf_conntrack_tuple *tuple)
+static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
+                                      struct nf_conntrack_tuple *tuple)
 {
        struct nlattr *tb[CTA_PROTO_MAX+1];
        struct nf_conntrack_l4proto *l4proto;
@@ -1050,9 +1039,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
                                    .len = NF_CT_HELPER_NAME_LEN - 1 },
 };
 
-static inline int
-ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
-                    struct nlattr **helpinfo)
+static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
+                               struct nlattr **helpinfo)
 {
        int err;
        struct nlattr *tb[CTA_HELP_MAX+1];
@@ -1463,8 +1451,8 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #endif
 }
 
-static inline int
-ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_helper(struct nf_conn *ct,
+                                  const struct nlattr * const cda[])
 {
        struct nf_conntrack_helper *helper;
        struct nf_conn_help *help = nfct_help(ct);
@@ -1524,8 +1512,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
        return -EOPNOTSUPP;
 }
 
-static inline int
-ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_timeout(struct nf_conn *ct,
+                                   const struct nlattr * const cda[])
 {
        u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
 
@@ -1544,8 +1532,8 @@ static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
        [CTA_PROTOINFO_SCTP]    = { .type = NLA_NESTED },
 };
 
-static inline int
-ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_protoinfo(struct nf_conn *ct,
+                                     const struct nlattr * const cda[])
 {
        const struct nlattr *attr = cda[CTA_PROTOINFO];
        struct nlattr *tb[CTA_PROTOINFO_MAX+1];
@@ -1571,8 +1559,8 @@ static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
        [CTA_SEQADJ_OFFSET_AFTER]       = { .type = NLA_U32 },
 };
 
-static inline int
-change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
+static int change_seq_adj(struct nf_ct_seqadj *seq,
+                         const struct nlattr * const attr)
 {
        int err;
        struct nlattr *cda[CTA_SEQADJ_MAX+1];
@@ -2405,10 +2393,9 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
  * EXPECT
  ***********************************************************************/
 
-static inline int
-ctnetlink_exp_dump_tuple(struct sk_buff *skb,
-                        const struct nf_conntrack_tuple *tuple,
-                        enum ctattr_expect type)
+static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
+                                   const struct nf_conntrack_tuple *tuple,
+                                   enum ctattr_expect type)
 {
        struct nlattr *nest_parms;
 
@@ -2425,10 +2412,9 @@ nla_put_failure:
        return -1;
 }
 
-static inline int
-ctnetlink_exp_dump_mask(struct sk_buff *skb,
-                       const struct nf_conntrack_tuple *tuple,
-                       const struct nf_conntrack_tuple_mask *mask)
+static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
+                                  const struct nf_conntrack_tuple *tuple,
+                                  const struct nf_conntrack_tuple_mask *mask)
 {
        int ret;
        struct nf_conntrack_l3proto *l3proto;
@@ -2646,10 +2632,14 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        last = (struct nf_conntrack_expect *)cb->args[1];
        for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
 restart:
-               hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
+               hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
                                     hnode) {
                        if (l3proto && exp->tuple.src.l3num != l3proto)
                                continue;
+
+                       if (!net_eq(nf_ct_net(exp->master), net))
+                               continue;
+
                        if (cb->args[1]) {
                                if (exp != last)
                                        continue;
@@ -2900,8 +2890,12 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
                spin_lock_bh(&nf_conntrack_expect_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
                        hlist_for_each_entry_safe(exp, next,
-                                                 &net->ct.expect_hash[i],
+                                                 &nf_ct_expect_hash[i],
                                                  hnode) {
+
+                               if (!net_eq(nf_ct_exp_net(exp), net))
+                                       continue;
+
                                m_help = nfct_help(exp->master);
                                if (!strcmp(m_help->helper->name, name) &&
                                    del_timer(&exp->timeout)) {
@@ -2918,8 +2912,12 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
                spin_lock_bh(&nf_conntrack_expect_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
                        hlist_for_each_entry_safe(exp, next,
-                                                 &net->ct.expect_hash[i],
+                                                 &nf_ct_expect_hash[i],
                                                  hnode) {
+
+                               if (!net_eq(nf_ct_exp_net(exp), net))
+                                       continue;
+
                                if (del_timer(&exp->timeout)) {
                                        nf_ct_unlink_expect_report(exp,
                                                        NETLINK_CB(skb).portid,
index fce1b1cca32d65c6241f0ee513db2fe5774a00b9..399a38fd685a545b8fb4bab4255b00e575d33c67 100644 (file)
@@ -645,7 +645,8 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
            nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
                       ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
            nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
-                        cpu_to_be64(ct->proto.dccp.handshake_seq)))
+                        cpu_to_be64(ct->proto.dccp.handshake_seq),
+                        CTA_PROTOINFO_DCCP_PAD))
                goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
        spin_unlock_bh(&ct->lock);
@@ -660,6 +661,7 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
        [CTA_PROTOINFO_DCCP_STATE]      = { .type = NLA_U8 },
        [CTA_PROTOINFO_DCCP_ROLE]       = { .type = NLA_U8 },
        [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
+       [CTA_PROTOINFO_DCCP_PAD]        = { .type = NLA_UNSPEC },
 };
 
 static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
index 9578a7c371ef2ce04f0a7b10c694b2533e29bd3a..1d7ab960a9e64c018d83ba428a8fbd9b00656985 100644 (file)
@@ -191,13 +191,7 @@ static void sctp_print_tuple(struct seq_file *s,
 /* Print out the private part of the conntrack. */
 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
-       enum sctp_conntrack state;
-
-       spin_lock_bh(&ct->lock);
-       state = ct->proto.sctp.state;
-       spin_unlock_bh(&ct->lock);
-
-       seq_printf(s, "%s ", sctp_conntrack_names[state]);
+       seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]);
 }
 
 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)    \
index 278f3b9356efdcd37ca3d1aacb62f03e57db7e3c..70c8381641a79ee7fa186ae241f987dd4629636c 100644 (file)
@@ -313,13 +313,7 @@ static void tcp_print_tuple(struct seq_file *s,
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
-       enum tcp_conntrack state;
-
-       spin_lock_bh(&ct->lock);
-       state = ct->proto.tcp.state;
-       spin_unlock_bh(&ct->lock);
-
-       seq_printf(s, "%s ", tcp_conntrack_names[state]);
+       seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
 }
 
 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
@@ -410,6 +404,8 @@ static void tcp_options(const struct sk_buff *skb,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize=*ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
@@ -470,6 +466,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize = *ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
index 478f92f834b61e7e2007779dc9dd8cace98ffcd2..4fd040575ffe4ed9bd887d9356dc9c6d84b73966 100644 (file)
@@ -309,6 +309,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
        .l3proto                = PF_INET,
        .l4proto                = IPPROTO_UDP,
        .name                   = "udp",
+       .allow_clash            = true,
        .pkt_to_tuple           = udp_pkt_to_tuple,
        .invert_tuple           = udp_invert_tuple,
        .print_tuple            = udp_print_tuple,
@@ -341,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
        .l3proto                = PF_INET6,
        .l4proto                = IPPROTO_UDP,
        .name                   = "udp",
+       .allow_clash            = true,
        .pkt_to_tuple           = udp_pkt_to_tuple,
        .invert_tuple           = udp_invert_tuple,
        .print_tuple            = udp_print_tuple,
index 1ac8ee13a873ea60dffb6d455c48fc11bf7e45c4..9d692f5adb941b311ac0ab5eba76d9ec8f126e70 100644 (file)
@@ -274,6 +274,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
        .l3proto                = PF_INET,
        .l4proto                = IPPROTO_UDPLITE,
        .name                   = "udplite",
+       .allow_clash            = true,
        .pkt_to_tuple           = udplite_pkt_to_tuple,
        .invert_tuple           = udplite_invert_tuple,
        .print_tuple            = udplite_print_tuple,
@@ -306,6 +307,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
        .l3proto                = PF_INET6,
        .l4proto                = IPPROTO_UDPLITE,
        .name                   = "udplite",
+       .allow_clash            = true,
        .pkt_to_tuple           = udplite_pkt_to_tuple,
        .invert_tuple           = udplite_invert_tuple,
        .print_tuple            = udplite_print_tuple,
index 0f1a45bcacb2414a2292022d4180584481f3f698..f87e84ebcec3ac78cc4b941ec8828fab8c623f55 100644 (file)
@@ -54,14 +54,13 @@ struct ct_iter_state {
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
        struct hlist_nulls_node *n;
 
        for (st->bucket = 0;
-            st->bucket < net->ct.htable_size;
+            st->bucket < nf_conntrack_htable_size;
             st->bucket++) {
-               n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+               n = rcu_dereference(hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -71,18 +70,17 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
                                      struct hlist_nulls_node *head)
 {
-       struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
        head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
-                       if (++st->bucket >= net->ct.htable_size)
+                       if (++st->bucket >= nf_conntrack_htable_size)
                                return NULL;
                }
                head = rcu_dereference(
                                hlist_nulls_first_rcu(
-                                       &net->ct.hash[st->bucket]));
+                                       &nf_conntrack_hash[st->bucket]));
        }
        return head;
 }
@@ -458,7 +456,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
        },
        {
                .procname       = "nf_conntrack_buckets",
-               .data           = &init_net.ct.htable_size,
+               .data           = &nf_conntrack_htable_size,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0444,
                .proc_handler   = proc_dointvec,
@@ -512,7 +510,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
                goto out_kmemdup;
 
        table[1].data = &net->ct.count;
-       table[2].data = &net->ct.htable_size;
        table[3].data = &net->ct.sysctl_checksum;
        table[4].data = &net->ct.sysctl_log_invalid;
 
index 06a9f45771ab613bdd529195f64583e6e4316a7c..6877a396f8fce12f47a6e4703e3725f7ac78bef9 100644 (file)
@@ -38,6 +38,9 @@ static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
                                                __read_mostly;
 
+static struct hlist_head *nf_nat_bysource __read_mostly;
+static unsigned int nf_nat_htable_size __read_mostly;
+static unsigned int nf_nat_hash_rnd __read_mostly;
 
 inline const struct nf_nat_l3proto *
 __nf_nat_l3proto_find(u8 family)
@@ -118,15 +121,17 @@ EXPORT_SYMBOL(nf_xfrm_me_harder);
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
 
+       get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
+
        /* Original src, to ensure we map it consistently if poss. */
        hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-                     tuple->dst.protonum ^ nf_conntrack_hash_rnd);
+                     tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
 
-       return reciprocal_scale(hash, net->ct.nat_htable_size);
+       return reciprocal_scale(hash, nf_nat_htable_size);
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -196,9 +201,10 @@ find_appropriate_src(struct net *net,
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
 
-       hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
+       hlist_for_each_entry_rcu(nat, &nf_nat_bysource[h], bysource) {
                ct = nat->ct;
                if (same_src(ct, tuple) &&
+                   net_eq(net, nf_ct_net(ct)) &&
                    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
                        /* Copy source part from reply tuple. */
                        nf_ct_invert_tuplepr(result,
@@ -431,7 +437,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                nat = nfct_nat(ct);
                nat->ct = ct;
                hlist_add_head_rcu(&nat->bysource,
-                                  &net->ct.nat_bysource[srchash]);
+                                  &nf_nat_bysource[srchash]);
                spin_unlock_bh(&nf_nat_lock);
        }
 
@@ -819,27 +825,14 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
 }
 #endif
 
-static int __net_init nf_nat_net_init(struct net *net)
-{
-       /* Leave them the same for the moment. */
-       net->ct.nat_htable_size = net->ct.htable_size;
-       net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
-       if (!net->ct.nat_bysource)
-               return -ENOMEM;
-       return 0;
-}
-
 static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
        nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
-       synchronize_rcu();
-       nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
-       .init = nf_nat_net_init,
        .exit = nf_nat_net_exit,
 };
 
@@ -852,8 +845,16 @@ static int __init nf_nat_init(void)
 {
        int ret;
 
+       /* Leave them the same for the moment. */
+       nf_nat_htable_size = nf_conntrack_htable_size;
+
+       nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
+       if (!nf_nat_bysource)
+               return -ENOMEM;
+
        ret = nf_ct_extend_register(&nat_extend);
        if (ret < 0) {
+               nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
                printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
                return ret;
        }
@@ -877,6 +878,7 @@ static int __init nf_nat_init(void)
        return 0;
 
  cleanup_extend:
+       nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
        nf_ct_extend_unregister(&nat_extend);
        return ret;
 }
@@ -895,6 +897,7 @@ static void __exit nf_nat_cleanup(void)
        for (i = 0; i < NFPROTO_NUMPROTO; i++)
                kfree(nf_nat_l4protos[i]);
        synchronize_net();
+       nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
 }
 
 MODULE_LICENSE("GPL");
index 2011977cd79d9481cd16404aa769c70524a15af4..4d292b933b5c5c83d205f844f1a876ebefca708c 100644 (file)
@@ -944,8 +944,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
-           nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts),
+                        NFTA_COUNTER_PAD) ||
+           nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
+                        NFTA_COUNTER_PAD))
                goto nla_put_failure;
 
        nla_nest_end(skb, nest);
@@ -975,7 +977,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
 
        if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
                goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+       if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle),
+                        NFTA_CHAIN_PAD))
                goto nla_put_failure;
        if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
                goto nla_put_failure;
@@ -1803,13 +1806,15 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
        if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
                goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+       if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle),
+                        NFTA_RULE_PAD))
                goto nla_put_failure;
 
        if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
                prule = list_entry(rule->list.prev, struct nft_rule, list);
                if (nla_put_be64(skb, NFTA_RULE_POSITION,
-                                cpu_to_be64(prule->handle)))
+                                cpu_to_be64(prule->handle),
+                                NFTA_RULE_PAD))
                        goto nla_put_failure;
        }
 
@@ -2312,7 +2317,7 @@ nft_select_set_ops(const struct nlattr * const nla[],
 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
        [NFTA_SET_TABLE]                = { .type = NLA_STRING },
        [NFTA_SET_NAME]                 = { .type = NLA_STRING,
-                                           .len = IFNAMSIZ - 1 },
+                                           .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_SET_FLAGS]                = { .type = NLA_U32 },
        [NFTA_SET_KEY_TYPE]             = { .type = NLA_U32 },
        [NFTA_SET_KEY_LEN]              = { .type = NLA_U32 },
@@ -2396,7 +2401,7 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
        unsigned long *inuse;
        unsigned int n = 0, min = 0;
 
-       p = strnchr(name, IFNAMSIZ, '%');
+       p = strnchr(name, NFT_SET_MAXNAMELEN, '%');
        if (p != NULL) {
                if (p[1] != 'd' || strchr(p + 2, '%'))
                        return -EINVAL;
@@ -2473,7 +2478,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
        }
 
        if (set->timeout &&
-           nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout)))
+           nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout),
+                        NFTA_SET_PAD))
                goto nla_put_failure;
        if (set->gc_int &&
            nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
@@ -2690,7 +2696,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
        struct nft_table *table;
        struct nft_set *set;
        struct nft_ctx ctx;
-       char name[IFNAMSIZ];
+       char name[NFT_SET_MAXNAMELEN];
        unsigned int size;
        bool create;
        u64 timeout;
@@ -3076,7 +3082,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
            nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
-                        cpu_to_be64(*nft_set_ext_timeout(ext))))
+                        cpu_to_be64(*nft_set_ext_timeout(ext)),
+                        NFTA_SET_ELEM_PAD))
                goto nla_put_failure;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
@@ -3089,7 +3096,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
                        expires = 0;
 
                if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
-                                cpu_to_be64(jiffies_to_msecs(expires))))
+                                cpu_to_be64(jiffies_to_msecs(expires)),
+                                NFTA_SET_ELEM_PAD))
                        goto nla_put_failure;
        }
 
@@ -3367,6 +3375,22 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem)
 }
 EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
 
+static int nft_setelem_parse_flags(const struct nft_set *set,
+                                  const struct nlattr *attr, u32 *flags)
+{
+       if (attr == NULL)
+               return 0;
+
+       *flags = ntohl(nla_get_be32(attr));
+       if (*flags & ~NFT_SET_ELEM_INTERVAL_END)
+               return -EINVAL;
+       if (!(set->flags & NFT_SET_INTERVAL) &&
+           *flags & NFT_SET_ELEM_INTERVAL_END)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                            const struct nlattr *attr)
 {
@@ -3380,8 +3404,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        struct nft_data data;
        enum nft_registers dreg;
        struct nft_trans *trans;
+       u32 flags = 0;
        u64 timeout;
-       u32 flags;
        u8 ulen;
        int err;
 
@@ -3395,17 +3419,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
 
        nft_set_ext_prepare(&tmpl);
 
-       flags = 0;
-       if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
-               flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
-               if (flags & ~NFT_SET_ELEM_INTERVAL_END)
-                       return -EINVAL;
-               if (!(set->flags & NFT_SET_INTERVAL) &&
-                   flags & NFT_SET_ELEM_INTERVAL_END)
-                       return -EINVAL;
-               if (flags != 0)
-                       nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
-       }
+       err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
+       if (err < 0)
+               return err;
+       if (flags != 0)
+               nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
 
        if (set->flags & NFT_SET_MAP) {
                if (nla[NFTA_SET_ELEM_DATA] == NULL &&
@@ -3574,9 +3592,13 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                           const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+       struct nft_set_ext_tmpl tmpl;
        struct nft_data_desc desc;
        struct nft_set_elem elem;
+       struct nft_set_ext *ext;
        struct nft_trans *trans;
+       u32 flags = 0;
+       void *priv;
        int err;
 
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -3588,6 +3610,14 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        if (nla[NFTA_SET_ELEM_KEY] == NULL)
                goto err1;
 
+       nft_set_ext_prepare(&tmpl);
+
+       err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
+       if (err < 0)
+               return err;
+       if (flags != 0)
+               nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+
        err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
                            nla[NFTA_SET_ELEM_KEY]);
        if (err < 0)
@@ -3597,24 +3627,40 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
                goto err2;
 
+       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len);
+
+       err = -ENOMEM;
+       elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
+                                     GFP_KERNEL);
+       if (elem.priv == NULL)
+               goto err2;
+
+       ext = nft_set_elem_ext(set, elem.priv);
+       if (flags)
+               *nft_set_ext_flags(ext) = flags;
+
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
        if (trans == NULL) {
                err = -ENOMEM;
-               goto err2;
+               goto err3;
        }
 
-       elem.priv = set->ops->deactivate(set, &elem);
-       if (elem.priv == NULL) {
+       priv = set->ops->deactivate(set, &elem);
+       if (priv == NULL) {
                err = -ENOENT;
-               goto err3;
+               goto err4;
        }
+       kfree(elem.priv);
+       elem.priv = priv;
 
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
-err3:
+err4:
        kfree(trans);
+err3:
+       kfree(elem.priv);
 err2:
        nft_data_uninit(&elem.key.val, desc.type);
 err1:
index e9e959f65d917ce018e7490fd08c7a308f7809a9..39eb1cc62e91fdda9608f96eb678f061f7b91a0c 100644 (file)
@@ -156,7 +156,8 @@ static int nf_trace_fill_rule_info(struct sk_buff *nlskb,
                return 0;
 
        return nla_put_be64(nlskb, NFTA_TRACE_RULE_HANDLE,
-                           cpu_to_be64(info->rule->handle));
+                           cpu_to_be64(info->rule->handle),
+                           NFTA_TRACE_PAD);
 }
 
 void nft_trace_notify(struct nft_traceinfo *info)
@@ -174,7 +175,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
        size = nlmsg_total_size(sizeof(struct nfgenmsg)) +
                nla_total_size(NFT_TABLE_MAXNAMELEN) +
                nla_total_size(NFT_CHAIN_MAXNAMELEN) +
-               nla_total_size(sizeof(__be64)) +        /* rule handle */
+               nla_total_size_64bit(sizeof(__be64)) +  /* rule handle */
                nla_total_size(sizeof(__be32)) +        /* trace type */
                nla_total_size(0) +                     /* VERDICT, nested */
                        nla_total_size(sizeof(u32)) +   /* verdict code */
index 4c2b4c0c4d5fa4ac209ab85020e97e14ed716ab9..d016066a25e37a44d5d6bb58204058e87f6b974f 100644 (file)
@@ -160,15 +160,18 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                pkts = atomic64_read(&acct->pkts);
                bytes = atomic64_read(&acct->bytes);
        }
-       if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
-           nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+       if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts),
+                        NFACCT_PAD) ||
+           nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
+                        NFACCT_PAD) ||
            nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
                goto nla_put_failure;
        if (acct->flags & NFACCT_F_QUOTA) {
                u64 *quota = (u64 *)acct->data;
 
                if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
-                   nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
+                   nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota),
+                                NFACCT_PAD))
                        goto nla_put_failure;
        }
        nlmsg_end(skb, nlh);
index 2671b9deb103735ff203999286a8cfbdde434f12..3c84f14326f56da253fc1dd55f58af1ebaba79e8 100644 (file)
@@ -306,10 +306,10 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
        int i;
 
        local_bh_disable();
-       for (i = 0; i < net->ct.htable_size; i++) {
+       for (i = 0; i < nf_conntrack_htable_size; i++) {
                nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
-               if (i < net->ct.htable_size) {
-                       hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+               if (i < nf_conntrack_htable_size) {
+                       hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
                                untimeout(h, timeout);
                }
                spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
index cb5b630a645b5357c9173e81cf73e076c701706b..aa93877ab6e2629bc31791135ace6ef474abd35e 100644 (file)
@@ -295,6 +295,59 @@ static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
        return seclen;
 }
 
+static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
+{
+       struct sk_buff *entskb = entry->skb;
+       u32 nlalen = 0;
+
+       if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
+               return 0;
+
+       if (skb_vlan_tag_present(entskb))
+               nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
+                                        nla_total_size(sizeof(__be16)));
+
+       if (entskb->network_header > entskb->mac_header)
+               nlalen += nla_total_size((entskb->network_header -
+                                         entskb->mac_header));
+
+       return nlalen;
+}
+
+static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
+{
+       struct sk_buff *entskb = entry->skb;
+
+       if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
+               return 0;
+
+       if (skb_vlan_tag_present(entskb)) {
+               struct nlattr *nest;
+
+               nest = nla_nest_start(skb, NFQA_VLAN | NLA_F_NESTED);
+               if (!nest)
+                       goto nla_put_failure;
+
+               if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
+                   nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, nest);
+       }
+
+       if (entskb->mac_header < entskb->network_header) {
+               int len = (int)(entskb->network_header - entskb->mac_header);
+
+               if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
 static struct sk_buff *
 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                           struct nf_queue_entry *entry,
@@ -334,6 +387,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (entskb->tstamp.tv64)
                size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 
+       size += nfqnl_get_bridge_size(entry);
+
        if (entry->state.hook <= NF_INET_FORWARD ||
           (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
                csum_verify = !skb_csum_unnecessary(entskb);
@@ -497,6 +552,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                }
        }
 
+       if (nfqnl_put_bridge(entry, skb) < 0)
+               goto nla_put_failure;
+
        if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
                struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
@@ -911,12 +969,18 @@ static struct notifier_block nfqnl_rtnl_notifier = {
        .notifier_call  = nfqnl_rcv_nl_event,
 };
 
+static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
+       [NFQA_VLAN_TCI]         = { .type = NLA_U16},
+       [NFQA_VLAN_PROTO]       = { .type = NLA_U16},
+};
+
 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
        [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
        [NFQA_MARK]             = { .type = NLA_U32 },
        [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
        [NFQA_CT]               = { .type = NLA_UNSPEC },
        [NFQA_EXP]              = { .type = NLA_UNSPEC },
+       [NFQA_VLAN]             = { .type = NLA_NESTED },
 };
 
 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -1030,6 +1094,40 @@ static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
        return ct;
 }
 
+static int nfqa_parse_bridge(struct nf_queue_entry *entry,
+                            const struct nlattr * const nfqa[])
+{
+       if (nfqa[NFQA_VLAN]) {
+               struct nlattr *tb[NFQA_VLAN_MAX + 1];
+               int err;
+
+               err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN],
+                                      nfqa_vlan_policy);
+               if (err < 0)
+                       return err;
+
+               if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
+                       return -EINVAL;
+
+               entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
+               entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
+       }
+
+       if (nfqa[NFQA_L2HDR]) {
+               int mac_header_len = entry->skb->network_header -
+                       entry->skb->mac_header;
+
+               if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
+                       return -EINVAL;
+               else if (mac_header_len > 0)
+                       memcpy(skb_mac_header(entry->skb),
+                              nla_data(nfqa[NFQA_L2HDR]),
+                              mac_header_len);
+       }
+
+       return 0;
+}
+
 static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
                              struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
@@ -1045,6 +1143,7 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
        struct nfnl_ct_hook *nfnl_ct;
        struct nf_conn *ct = NULL;
        struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+       int err;
 
        queue = instance_lookup(q, queue_num);
        if (!queue)
@@ -1071,6 +1170,12 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
                        ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
        }
 
+       if (entry->state.pf == PF_BRIDGE) {
+               err = nfqa_parse_bridge(entry, nfqa);
+               if (err < 0)
+                       return err;
+       }
+
        if (nfqa[NFQA_PAYLOAD]) {
                u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
                int diff = payload_len - entry->skb->len;
index c9743f78f21999ae01ed735a63c4e856e79cdc37..77db8358ab14fa600601ad73ef91c26c843db605 100644 (file)
@@ -76,8 +76,10 @@ static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
 
        nft_counter_fetch(priv->counter, &total);
 
-       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
-           nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
+       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
+                        NFTA_COUNTER_PAD) ||
+           nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets),
+                        NFTA_COUNTER_PAD))
                goto nla_put_failure;
        return 0;
 
index d4a4619fcebc927acd1bf730e044d29813d096d3..137e308d5b24c0865336da92e97686d8fef0b9d1 100644 (file)
@@ -197,6 +197,14 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS:
+               nf_connlabels_replace(ct,
+                                     &regs->data[priv->sreg],
+                                     &regs->data[priv->sreg],
+                                     NF_CT_LABELS_MAX_SIZE / sizeof(u32));
+               break;
 #endif
        default:
                break;
@@ -364,6 +372,16 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
        case NFT_CT_MARK:
                len = FIELD_SIZEOF(struct nf_conn, mark);
                break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS:
+               if (tb[NFTA_CT_DIRECTION])
+                       return -EINVAL;
+               len = NF_CT_LABELS_MAX_SIZE;
+               err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
+               if (err)
+                       return err;
+               break;
 #endif
        default:
                return -EOPNOTSUPP;
@@ -384,6 +402,18 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
 static void nft_ct_destroy(const struct nft_ctx *ctx,
                           const struct nft_expr *expr)
 {
+       struct nft_ct *priv = nft_expr_priv(expr);
+
+       switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS:
+               nf_connlabels_put(ctx->net);
+               break;
+#endif
+       default:
+               break;
+       }
+
        nft_ct_l3proto_module_put(ctx->afi->family);
 }
 
@@ -484,6 +514,8 @@ static struct nft_expr_type nft_ct_type __read_mostly = {
 
 static int __init nft_ct_module_init(void)
 {
+       BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > NFT_REG_SIZE);
+
        return nft_register_expr(&nft_ct_type);
 }
 
index 9dec3bd1b63cc01a784ec025f7cf97f42b38fd06..78d4914fb39c41bf4ceeec2febd22ae0a64a274e 100644 (file)
@@ -227,7 +227,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
                goto nla_put_failure;
        if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
                goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
+       if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+                        NFTA_DYNSET_PAD))
                goto nla_put_failure;
        if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
                goto nla_put_failure;
index 99d18578afc669eaea60d99a19844a7cb1f0296d..070b98938e02a6a5239318151cff3bff94fb32fb 100644 (file)
@@ -97,8 +97,10 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
        u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
        u64 rate = limit->rate - limit->burst;
 
-       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
-           nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate),
+                        NFTA_LIMIT_PAD) ||
+           nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
+                        NFTA_LIMIT_PAD) ||
            nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
            nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
            nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
index 1c30f41cff5b44c5059e159fe269ed33aea3b824..f762094af7c1ca7e7684448381e7e49ee430045e 100644 (file)
@@ -29,6 +29,17 @@ struct nft_rbtree_elem {
        struct nft_set_ext      ext;
 };
 
+static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
+{
+       return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
+              (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
+}
+
+static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
+                            const struct nft_rbtree_elem *interval)
+{
+       return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+}
 
 static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
                              const struct nft_set_ext **ext)
@@ -37,6 +48,7 @@ static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
        const struct nft_rbtree_elem *rbe, *interval = NULL;
        const struct rb_node *parent;
        u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
+       const void *this;
        int d;
 
        spin_lock_bh(&nft_rbtree_lock);
@@ -44,9 +56,16 @@ static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-               d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
+               this = nft_set_ext_key(&rbe->ext);
+               d = memcmp(this, key, set->klen);
                if (d < 0) {
                        parent = parent->rb_left;
+                       /* In case of adjacent ranges, we always see the high
+                        * part of the range in first place, before the low one.
+                        * So don't update interval if the keys are equal.
+                        */
+                       if (interval && nft_rbtree_equal(set, this, interval))
+                               continue;
                        interval = rbe;
                } else if (d > 0)
                        parent = parent->rb_right;
@@ -56,9 +75,7 @@ found:
                                parent = parent->rb_left;
                                continue;
                        }
-                       if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
-                           *nft_set_ext_flags(&rbe->ext) &
-                           NFT_SET_ELEM_INTERVAL_END)
+                       if (nft_rbtree_interval_end(rbe))
                                goto out;
                        spin_unlock_bh(&nft_rbtree_lock);
 
@@ -98,9 +115,16 @@ static int __nft_rbtree_insert(const struct nft_set *set,
                else if (d > 0)
                        p = &parent->rb_right;
                else {
-                       if (nft_set_elem_active(&rbe->ext, genmask))
-                               return -EEXIST;
-                       p = &parent->rb_left;
+                       if (nft_set_elem_active(&rbe->ext, genmask)) {
+                               if (nft_rbtree_interval_end(rbe) &&
+                                   !nft_rbtree_interval_end(new))
+                                       p = &parent->rb_left;
+                               else if (!nft_rbtree_interval_end(rbe) &&
+                                        nft_rbtree_interval_end(new))
+                                       p = &parent->rb_right;
+                               else
+                                       return -EEXIST;
+                       }
                }
        }
        rb_link_node(&new->node, parent, p);
@@ -145,7 +169,7 @@ static void *nft_rbtree_deactivate(const struct nft_set *set,
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
        const struct rb_node *parent = priv->root.rb_node;
-       struct nft_rbtree_elem *rbe;
+       struct nft_rbtree_elem *rbe, *this = elem->priv;
        u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int d;
 
@@ -163,6 +187,15 @@ static void *nft_rbtree_deactivate(const struct nft_set *set,
                                parent = parent->rb_left;
                                continue;
                        }
+                       if (nft_rbtree_interval_end(rbe) &&
+                           !nft_rbtree_interval_end(this)) {
+                               parent = parent->rb_left;
+                               continue;
+                       } else if (!nft_rbtree_interval_end(rbe) &&
+                                  nft_rbtree_interval_end(this)) {
+                               parent = parent->rb_right;
+                               continue;
+                       }
                        nft_set_elem_change_active(set, &rbe->ext);
                        return rbe;
                }
index 582c9cfd6567ce4c7d5b3f86c15732b33a63e1b4..c69c892231d7b6b8699f20913847f64f1a758a42 100644 (file)
@@ -416,6 +416,47 @@ int xt_check_match(struct xt_mtchk_param *par,
 }
 EXPORT_SYMBOL_GPL(xt_check_match);
 
+/** xt_check_entry_match - check that matches end before start of target
+ *
+ * @match: beginning of xt_entry_match
+ * @target: beginning of this rules target (alleged end of matches)
+ * @alignment: alignment requirement of match structures
+ *
+ * Validates that all matches add up to the beginning of the target,
+ * and that each match covers at least the base structure size.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static int xt_check_entry_match(const char *match, const char *target,
+                               const size_t alignment)
+{
+       const struct xt_entry_match *pos;
+       int length = target - match;
+
+       if (length == 0) /* no matches */
+               return 0;
+
+       pos = (struct xt_entry_match *)match;
+       do {
+               if ((unsigned long)pos % alignment)
+                       return -EINVAL;
+
+               if (length < (int)sizeof(struct xt_entry_match))
+                       return -EINVAL;
+
+               if (pos->u.match_size < sizeof(struct xt_entry_match))
+                       return -EINVAL;
+
+               if (pos->u.match_size > length)
+                       return -EINVAL;
+
+               length -= pos->u.match_size;
+               pos = ((void *)((char *)(pos) + (pos)->u.match_size));
+       } while (length > 0);
+
+       return 0;
+}
+
 #ifdef CONFIG_COMPAT
 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 {
@@ -485,13 +526,14 @@ int xt_compat_match_offset(const struct xt_match *match)
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
-                             unsigned int *size)
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+                              unsigned int *size)
 {
        const struct xt_match *match = m->u.kernel.match;
        struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
        int pad, off = xt_compat_match_offset(match);
        u_int16_t msize = cm->u.user.match_size;
+       char name[sizeof(m->u.user.name)];
 
        m = *dstptr;
        memcpy(m, cm, sizeof(*cm));
@@ -505,10 +547,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 
        msize += off;
        m->u.user.match_size = msize;
+       strlcpy(name, match->name, sizeof(name));
+       module_put(match->me);
+       strncpy(m->u.user.name, name, sizeof(m->u.user.name));
 
        *size += off;
        *dstptr += msize;
-       return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 
@@ -539,8 +583,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
        return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
+
+/* non-compat version may have padding after verdict */
+struct compat_xt_standard_target {
+       struct compat_xt_entry_target t;
+       compat_uint_t verdict;
+};
+
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+                                 unsigned int target_offset,
+                                 unsigned int next_offset)
+{
+       long size_of_base_struct = elems - (const char *)base;
+       const struct compat_xt_entry_target *t;
+       const char *e = base;
+
+       if (target_offset < size_of_base_struct)
+               return -EINVAL;
+
+       if (target_offset + sizeof(*t) > next_offset)
+               return -EINVAL;
+
+       t = (void *)(e + target_offset);
+       if (t->u.target_size < sizeof(*t))
+               return -EINVAL;
+
+       if (target_offset + t->u.target_size > next_offset)
+               return -EINVAL;
+
+       if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+           target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+               return -EINVAL;
+
+       /* compat_xt_entry match has less strict aligment requirements,
+        * otherwise they are identical.  In case of padding differences
+        * we need to add compat version of xt_check_entry_match.
+        */
+       BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
+
+       return xt_check_entry_match(elems, base + target_offset,
+                                   __alignof__(struct compat_xt_entry_match));
+}
+EXPORT_SYMBOL(xt_compat_check_entry_offsets);
 #endif /* CONFIG_COMPAT */
 
+/**
+ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
+ *
+ * @base: pointer to arp/ip/ip6t_entry
+ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
+ * @target_offset: the arp/ip/ip6_t->target_offset
+ * @next_offset: the arp/ip/ip6_t->next_offset
+ *
+ * validates that target_offset and next_offset are sane and that all
+ * match sizes (if any) align with the target offset.
+ *
+ * This function does not validate the targets or matches themselves, it
+ * only tests that all the offsets and sizes are correct, that all
+ * match structures are aligned, and that the last structure ends where
+ * the target structure begins.
+ *
+ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
+ *
+ * The arp/ip/ip6t_entry structure @base must have passed following tests:
+ * - it must point to a valid memory location
+ * - base to base + next_offset must be accessible, i.e. not exceed allocated
+ *   length.
+ *
+ * A well-formed entry looks like this:
+ *
+ * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
+ * e->elems[]-----'                              |               |
+ *                matchsize                      |               |
+ *                                matchsize      |               |
+ *                                               |               |
+ * target_offset---------------------------------'               |
+ * next_offset---------------------------------------------------'
+ *
+ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
+ *          This is where matches (if any) and the target reside.
+ * target_offset: beginning of target.
+ * next_offset: start of the next rule; also: size of this rule.
+ * Since targets have a minimum size, target_offset + minlen <= next_offset.
+ *
+ * Every match stores its size, sum of sizes must not exceed target_offset.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int xt_check_entry_offsets(const void *base,
+                          const char *elems,
+                          unsigned int target_offset,
+                          unsigned int next_offset)
+{
+       long size_of_base_struct = elems - (const char *)base;
+       const struct xt_entry_target *t;
+       const char *e = base;
+
+       /* target start is within the ip/ip6/arpt_entry struct */
+       if (target_offset < size_of_base_struct)
+               return -EINVAL;
+
+       if (target_offset + sizeof(*t) > next_offset)
+               return -EINVAL;
+
+       t = (void *)(e + target_offset);
+       if (t->u.target_size < sizeof(*t))
+               return -EINVAL;
+
+       if (target_offset + t->u.target_size > next_offset)
+               return -EINVAL;
+
+       if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+           target_offset + sizeof(struct xt_standard_target) != next_offset)
+               return -EINVAL;
+
+       return xt_check_entry_match(elems, base + target_offset,
+                                   __alignof__(struct xt_entry_match));
+}
+EXPORT_SYMBOL(xt_check_entry_offsets);
+
 int xt_check_target(struct xt_tgchk_param *par,
                    unsigned int size, u_int8_t proto, bool inv_proto)
 {
@@ -591,6 +752,80 @@ int xt_check_target(struct xt_tgchk_param *par,
 }
 EXPORT_SYMBOL_GPL(xt_check_target);
 
+/**
+ * xt_copy_counters_from_user - copy counters and metadata from userspace
+ *
+ * @user: src pointer to userspace memory
+ * @len: alleged size of userspace memory
+ * @info: where to store the xt_counters_info metadata
+ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
+ *
+ * Copies counter meta data from @user and stores it in @info.
+ *
+ * vmallocs memory to hold the counters, then copies the counter data
+ * from @user to the new memory and returns a pointer to it.
+ *
+ * If @compat is true, @info gets converted automatically to the 64bit
+ * representation.
+ *
+ * The metadata associated with the counters is stored in @info.
+ *
+ * Return: returns pointer that caller has to test via IS_ERR().
+ * If IS_ERR is false, caller has to vfree the pointer.
+ */
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+                                struct xt_counters_info *info, bool compat)
+{
+       void *mem;
+       u64 size;
+
+#ifdef CONFIG_COMPAT
+       if (compat) {
+               /* structures only differ in size due to alignment */
+               struct compat_xt_counters_info compat_tmp;
+
+               if (len <= sizeof(compat_tmp))
+                       return ERR_PTR(-EINVAL);
+
+               len -= sizeof(compat_tmp);
+               if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
+                       return ERR_PTR(-EFAULT);
+
+               strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+               info->num_counters = compat_tmp.num_counters;
+               user += sizeof(compat_tmp);
+       } else
+#endif
+       {
+               if (len <= sizeof(*info))
+                       return ERR_PTR(-EINVAL);
+
+               len -= sizeof(*info);
+               if (copy_from_user(info, user, sizeof(*info)) != 0)
+                       return ERR_PTR(-EFAULT);
+
+               info->name[sizeof(info->name) - 1] = '\0';
+               user += sizeof(*info);
+       }
+
+       size = sizeof(struct xt_counters);
+       size *= info->num_counters;
+
+       if (size != (u64)len)
+               return ERR_PTR(-EINVAL);
+
+       mem = vmalloc(len);
+       if (!mem)
+               return ERR_PTR(-ENOMEM);
+
+       if (copy_from_user(mem, user, len) == 0)
+               return mem;
+
+       vfree(mem);
+       return ERR_PTR(-EFAULT);
+}
+EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
+
 #ifdef CONFIG_COMPAT
 int xt_compat_target_offset(const struct xt_target *target)
 {
@@ -606,6 +841,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
        struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
        int pad, off = xt_compat_target_offset(target);
        u_int16_t tsize = ct->u.user.target_size;
+       char name[sizeof(t->u.user.name)];
 
        t = *dstptr;
        memcpy(t, ct, sizeof(*ct));
@@ -619,6 +855,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 
        tsize += off;
        t->u.user.target_size = tsize;
+       strlcpy(name, target->name, sizeof(name));
+       module_put(target->me);
+       strncpy(t->u.user.name, name, sizeof(t->u.user.name));
 
        *size += off;
        *dstptr += tsize;
index bb9cbeb188686f5f858ba8e35493457063d3f6ea..a79af255561aaa5692daa0d0b4642e817ee22227 100644 (file)
@@ -18,6 +18,16 @@ MODULE_DESCRIPTION("Xtables: add/match connection trackling labels");
 MODULE_ALIAS("ipt_connlabel");
 MODULE_ALIAS("ip6t_connlabel");
 
+static bool connlabel_match(const struct nf_conn *ct, u16 bit)
+{
+       struct nf_conn_labels *labels = nf_ct_labels_find(ct);
+
+       if (!labels)
+               return false;
+
+       return BIT_WORD(bit) < labels->words && test_bit(bit, labels->bits);
+}
+
 static bool
 connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -33,7 +43,7 @@ connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
        if (info->options & XT_CONNLABEL_OP_SET)
                return (nf_connlabel_set(ct, info->bit) == 0) ^ invert;
 
-       return nf_connlabel_match(ct, info->bit) ^ invert;
+       return connlabel_match(ct, info->bit) ^ invert;
 }
 
 static int connlabel_mt_check(const struct xt_mtchk_param *par)
@@ -55,7 +65,7 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
                return ret;
        }
 
-       ret = nf_connlabels_get(par->net, info->bit + 1);
+       ret = nf_connlabels_get(par->net, info->bit);
        if (ret < 0)
                nf_ct_l3proto_module_put(par->family);
        return ret;
index 0f16bf635480dcbc28341e12487cb1c63037706c..aeefe127691a8f14f132a5c6db8ad3e8a68d546a 100644 (file)
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_write_queue);
 
-       if (nlk->portid) {
+       if (nlk->portid && nlk->bound) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
index e9dd47b2a85b9e7b65795335b5722e89ac5c3ed8..879185fe183fd0ffa2bf037725faf0a8eb5f166a 100644 (file)
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 
                if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
-                       set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
                                      true);
                        memcpy(&flow_key->ipv6.addr.src, masked,
                               sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                                                             NULL, &flags)
                                               != NEXTHDR_ROUTING);
 
-                       set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
                                      recalc_csum);
                        memcpy(&flow_key->ipv6.addr.dst, masked,
                               sizeof(flow_key->ipv6.addr.dst));
index 1b9d286756be7ccf3a9b6f9eaad6fe07c385f8fb..9f0bc49fa96956c6360f5972411f5fdb3961b788 100644 (file)
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
+               skb_orphan(skb);
                memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
                err = nf_ct_frag6_gather(net, skb, user);
                if (err)
@@ -438,20 +439,12 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
        u8 protonum;
 
        l3proto = __nf_ct_l3proto_find(l3num);
-       if (!l3proto) {
-               pr_debug("ovs_ct_find_existing: Can't get l3proto\n");
-               return NULL;
-       }
        if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff,
                                 &protonum) <= 0) {
                pr_debug("ovs_ct_find_existing: Can't get protonum\n");
                return NULL;
        }
        l4proto = __nf_ct_l4proto_find(l3num, protonum);
-       if (!l4proto) {
-               pr_debug("ovs_ct_find_existing: Can't get l4proto\n");
-               return NULL;
-       }
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
                             protonum, net, &tuple, l3proto, l4proto)) {
                pr_debug("ovs_ct_find_existing: Can't get tuple\n");
@@ -1344,7 +1337,7 @@ void ovs_ct_init(struct net *net)
        unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
-       if (nf_connlabels_get(net, n_bits)) {
+       if (nf_connlabels_get(net, n_bits - 1)) {
                ovs_net->xt_label = false;
                OVS_NLERR(true, "Failed to set connlabel length");
        } else {
index 0cc66a4e492deb8484781434d211fef5fa84b25c..856bd8dba676418723c420b982af311c140f62fc 100644 (file)
@@ -738,9 +738,9 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
                len += nla_total_size(acts->orig_len);
 
        return len
-               + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+               + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
                + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
-               + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
+               + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
 }
 
 /* Called with ovs_mutex or RCU read lock. */
@@ -754,11 +754,14 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
        ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
 
        if (used &&
-           nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+           nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
+                             OVS_FLOW_ATTR_PAD))
                return -EMSGSIZE;
 
        if (stats.n_packets &&
-           nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
+           nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
+                         sizeof(struct ovs_flow_stats), &stats,
+                         OVS_FLOW_ATTR_PAD))
                return -EMSGSIZE;
 
        if ((u8)ntohs(tcp_flags) &&
@@ -1434,8 +1437,8 @@ static size_t ovs_dp_cmd_msg_size(void)
        size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
 
        msgsize += nla_total_size(IFNAMSIZ);
-       msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
-       msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
+       msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
+       msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
        msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
 
        return msgsize;
@@ -1462,13 +1465,13 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
                goto nla_put_failure;
 
        get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
-       if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
-                       &dp_stats))
+       if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
+                         &dp_stats, OVS_DP_ATTR_PAD))
                goto nla_put_failure;
 
-       if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
-                       sizeof(struct ovs_dp_megaflow_stats),
-                       &dp_megaflow_stats))
+       if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
+                         sizeof(struct ovs_dp_megaflow_stats),
+                         &dp_megaflow_stats, OVS_DP_ATTR_PAD))
                goto nla_put_failure;
 
        if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
@@ -1837,8 +1840,9 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
-       if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
-                   &vport_stats))
+       if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
+                         sizeof(struct ovs_vport_stats), &vport_stats,
+                         OVS_VPORT_ATTR_PAD))
                goto nla_put_failure;
 
        if (ovs_vport_get_upcall_portids(vport, skb))
index 689c1726422162abfbaa47c2b103c94971d5190e..0bb650f4f219933fc8f10d455714d7e9016334fe 100644 (file)
@@ -261,7 +261,7 @@ size_t ovs_tun_key_attr_size(void)
        /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
         * updating this function.
         */
-       return    nla_total_size(8)    /* OVS_TUNNEL_KEY_ATTR_ID */
+       return    nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
                + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
                + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
                + nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
@@ -720,7 +720,8 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
                              unsigned short tun_proto)
 {
        if (output->tun_flags & TUNNEL_KEY &&
-           nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
+           nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
+                        OVS_TUNNEL_KEY_ATTR_PAD))
                return -EMSGSIZE;
        switch (tun_proto) {
        case AF_INET:
index 7c8b90bf0e540e971d69603987d218f44583bd1d..2ee48e447b724af8afe4c77fa64fe4512d1725fe 100644 (file)
@@ -165,11 +165,10 @@ static void do_setup(struct net_device *netdev)
 
        netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
-                             IFF_PHONY_HEADROOM;
+                             IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
        netdev->destructor = internal_dev_destructor;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->rtnl_link_ops = &internal_dev_link_ops;
-       netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
                           NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
index 0007e23202e4eb584f14adc8fae2b9e86c4c7988..4040eb92d9c9dc3b1ce0ba41227b6b1e9bd32eff 100644 (file)
@@ -2052,6 +2052,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        u8 *skb_head = skb->data;
        int skb_len = skb->len;
        unsigned int snaplen, res;
+       bool is_drop_n_account = false;
 
        if (skb->pkt_type == PACKET_LOOPBACK)
                goto drop;
@@ -2140,6 +2141,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 
 drop_n_acct:
+       is_drop_n_account = true;
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_drops++;
        atomic_inc(&sk->sk_drops);
@@ -2151,7 +2153,10 @@ drop_n_restore:
                skb->len = skb_len;
        }
 drop:
-       consume_skb(skb);
+       if (!is_drop_n_account)
+               consume_skb(skb);
+       else
+               kfree_skb(skb);
        return 0;
 }
 
@@ -2170,6 +2175,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sk_buff *copy_skb = NULL;
        struct timespec ts;
        __u32 ts_status;
+       bool is_drop_n_account = false;
 
        /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
         * We may add members to them until current aligned size without forcing
@@ -2377,10 +2383,14 @@ drop_n_restore:
                skb->len = skb_len;
        }
 drop:
-       kfree_skb(skb);
+       if (!is_drop_n_account)
+               consume_skb(skb);
+       else
+               kfree_skb(skb);
        return 0;
 
 drop_n_account:
+       is_drop_n_account = true;
        po->stats.stats1.tp_drops++;
        spin_unlock(&sk->sk_receive_queue.lock);
 
@@ -3541,6 +3551,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
        i->ifindex = mreq->mr_ifindex;
        i->alen = mreq->mr_alen;
        memcpy(i->addr, mreq->mr_address, i->alen);
+       memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
        i->count = 1;
        i->next = po->mclist;
        po->mclist = i;
@@ -4171,7 +4182,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
        if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
-               WARN(1, "Tx-ring is not supported.\n");
+               net_warn_ratelimited("Tx-ring is not supported.\n");
                goto out;
        }
 
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
new file mode 100644 (file)
index 0000000..673fd1f
--- /dev/null
@@ -0,0 +1,24 @@
+# Qualcomm IPC Router configuration
+#
+
+config QRTR
+       tristate "Qualcomm IPC Router support"
+       depends on ARCH_QCOM || COMPILE_TEST
+       ---help---
+         Say Y if you intend to use Qualcomm IPC router protocol.  The
+         protocol is used to communicate with services provided by other
+         hardware blocks in the system.
+
+         In order to do service lookups, a userspace daemon is required to
+         maintain a service listing.
+
+if QRTR
+
+config QRTR_SMD
+       tristate "SMD IPC Router channels"
+       depends on QCOM_SMD || COMPILE_TEST
+       ---help---
+         Say Y here to support SMD based ipcrouter channels.  SMD is the
+         most common transport for IPC Router.
+
+endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
new file mode 100644 (file)
index 0000000..6c00dc6
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QRTR) := qrtr.o
+obj-$(CONFIG_QRTR_SMD) += smd.o
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
new file mode 100644 (file)
index 0000000..c985ecb
--- /dev/null
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/qrtr.h>
+#include <linux/termios.h>     /* For TIOCINQ/OUTQ */
+
+#include <net/sock.h>
+
+#include "qrtr.h"
+
+#define QRTR_PROTO_VER 1
+
+/* auto-bind range */
+#define QRTR_MIN_EPH_SOCKET 0x4000
+#define QRTR_MAX_EPH_SOCKET 0x7fff
+
+enum qrtr_pkt_type {
+       QRTR_TYPE_DATA          = 1,
+       QRTR_TYPE_HELLO         = 2,
+       QRTR_TYPE_BYE           = 3,
+       QRTR_TYPE_NEW_SERVER    = 4,
+       QRTR_TYPE_DEL_SERVER    = 5,
+       QRTR_TYPE_DEL_CLIENT    = 6,
+       QRTR_TYPE_RESUME_TX     = 7,
+       QRTR_TYPE_EXIT          = 8,
+       QRTR_TYPE_PING          = 9,
+};
+
+/**
+ * struct qrtr_hdr - (I|R)PCrouter packet header
+ * @version: protocol version
+ * @type: packet type; one of QRTR_TYPE_*
+ * @src_node_id: source node
+ * @src_port_id: source port
+ * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
+ * @size: length of packet, excluding this header
+ * @dst_node_id: destination node
+ * @dst_port_id: destination port
+ */
+struct qrtr_hdr {
+       __le32 version;
+       __le32 type;
+       __le32 src_node_id;
+       __le32 src_port_id;
+       __le32 confirm_rx;
+       __le32 size;
+       __le32 dst_node_id;
+       __le32 dst_port_id;
+} __packed;
+
+#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
+#define QRTR_NODE_BCAST ((unsigned int)-1)
+#define QRTR_PORT_CTRL ((unsigned int)-2)
+
+struct qrtr_sock {
+       /* WARNING: sk must be the first member */
+       struct sock sk;
+       struct sockaddr_qrtr us;
+       struct sockaddr_qrtr peer;
+};
+
+static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
+{
+       BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
+       return container_of(sk, struct qrtr_sock, sk);
+}
+
+static unsigned int qrtr_local_nid = -1;
+
+/* for node ids */
+static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
+/* broadcast list */
+static LIST_HEAD(qrtr_all_nodes);
+/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
+static DEFINE_MUTEX(qrtr_node_lock);
+
+/* local port allocation management */
+static DEFINE_IDR(qrtr_ports);
+static DEFINE_MUTEX(qrtr_port_lock);
+
+/**
+ * struct qrtr_node - endpoint node
+ * @ep_lock: lock for endpoint management and callbacks
+ * @ep: endpoint
+ * @ref: reference count for node
+ * @nid: node id
+ * @rx_queue: receive queue
+ * @work: scheduled work struct for recv work
+ * @item: list item for broadcast list
+ */
+struct qrtr_node {
+       struct mutex ep_lock;
+       struct qrtr_endpoint *ep;
+       struct kref ref;
+       unsigned int nid;
+
+       struct sk_buff_head rx_queue;
+       struct work_struct work;
+       struct list_head item;
+};
+
+/* Release node resources and free the node.
+ *
+ * Do not call directly, use qrtr_node_release.  To be used with
+ * kref_put_mutex.  As such, the node mutex is expected to be locked on call.
+ */
+static void __qrtr_node_release(struct kref *kref)
+{
+       struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
+
+       if (node->nid != QRTR_EP_NID_AUTO)
+               radix_tree_delete(&qrtr_nodes, node->nid);
+
+       list_del(&node->item);
+       mutex_unlock(&qrtr_node_lock);
+
+       skb_queue_purge(&node->rx_queue);
+       kfree(node);
+}
+
+/* Increment reference to node. */
+static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
+{
+       if (node)
+               kref_get(&node->ref);
+       return node;
+}
+
+/* Decrement reference to node and release as necessary. */
+static void qrtr_node_release(struct qrtr_node *node)
+{
+       if (!node)
+               return;
+       kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+}
+
+/* Pass an outgoing packet socket buffer to the endpoint driver. */
+static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+       int rc = -ENODEV;
+
+       mutex_lock(&node->ep_lock);
+       if (node->ep)
+               rc = node->ep->xmit(node->ep, skb);
+       else
+               kfree_skb(skb);
+       mutex_unlock(&node->ep_lock);
+
+       return rc;
+}
+
+/* Lookup node by id.
+ *
+ * callers must release with qrtr_node_release()
+ */
+static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
+{
+       struct qrtr_node *node;
+
+       mutex_lock(&qrtr_node_lock);
+       node = radix_tree_lookup(&qrtr_nodes, nid);
+       node = qrtr_node_acquire(node);
+       mutex_unlock(&qrtr_node_lock);
+
+       return node;
+}
+
+/* Assign node id to node.
+ *
+ * This is mostly useful for automatic node id assignment, based on
+ * the source id in the incoming packet.
+ */
+static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
+{
+       if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
+               return;
+
+       mutex_lock(&qrtr_node_lock);
+       radix_tree_insert(&qrtr_nodes, nid, node);
+       node->nid = nid;
+       mutex_unlock(&qrtr_node_lock);
+}
+
+/**
+ * qrtr_endpoint_post() - post incoming data
+ * @ep: endpoint handle
+ * @data: data pointer
+ * @len: size of data in bytes
+ *
+ * Return: 0 on success; negative error code on failure
+ */
+int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+{
+       struct qrtr_node *node = ep->node;
+       const struct qrtr_hdr *phdr = data;
+       struct sk_buff *skb;
+       unsigned int psize;
+       unsigned int size;
+       unsigned int type;
+       unsigned int ver;
+       unsigned int dst;
+
+       if (len < QRTR_HDR_SIZE || len & 3)
+               return -EINVAL;
+
+       ver = le32_to_cpu(phdr->version);
+       size = le32_to_cpu(phdr->size);
+       type = le32_to_cpu(phdr->type);
+       dst = le32_to_cpu(phdr->dst_port_id);
+
+       psize = (size + 3) & ~3;
+
+       if (ver != QRTR_PROTO_VER)
+               return -EINVAL;
+
+       if (len != psize + QRTR_HDR_SIZE)
+               return -EINVAL;
+
+       if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
+               return -EINVAL;
+
+       skb = netdev_alloc_skb(NULL, len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_reset_transport_header(skb);
+       memcpy(skb_put(skb, len), data, len);
+
+       skb_queue_tail(&node->rx_queue, skb);
+       schedule_work(&node->work);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
+
+/* Allocate and construct a resume-tx packet. */
+static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+                                           u32 dst_node, u32 port)
+{
+       const int pkt_len = 20;
+       struct qrtr_hdr *hdr;
+       struct sk_buff *skb;
+       u32 *buf;
+
+       skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+       skb_reset_transport_header(skb);
+
+       hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
+       hdr->version = cpu_to_le32(QRTR_PROTO_VER);
+       hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+       hdr->src_node_id = cpu_to_le32(src_node);
+       hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
+       hdr->confirm_rx = cpu_to_le32(0);
+       hdr->size = cpu_to_le32(pkt_len);
+       hdr->dst_node_id = cpu_to_le32(dst_node);
+       hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
+
+       buf = (u32 *)skb_put(skb, pkt_len);
+       memset(buf, 0, pkt_len);
+       buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+       buf[1] = cpu_to_le32(src_node);
+       buf[2] = cpu_to_le32(port);
+
+       return skb;
+}
+
+static struct qrtr_sock *qrtr_port_lookup(int port);
+static void qrtr_port_put(struct qrtr_sock *ipc);
+
+/* Handle and route a received packet.
+ *
+ * This will auto-reply with resume-tx packet as necessary.
+ */
+static void qrtr_node_rx_work(struct work_struct *work)
+{
+       struct qrtr_node *node = container_of(work, struct qrtr_node, work);
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
+               const struct qrtr_hdr *phdr;
+               u32 dst_node, dst_port;
+               struct qrtr_sock *ipc;
+               u32 src_node;
+               int confirm;
+
+               phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+               src_node = le32_to_cpu(phdr->src_node_id);
+               dst_node = le32_to_cpu(phdr->dst_node_id);
+               dst_port = le32_to_cpu(phdr->dst_port_id);
+               confirm = !!phdr->confirm_rx;
+
+               qrtr_node_assign(node, src_node);
+
+               ipc = qrtr_port_lookup(dst_port);
+               if (!ipc) {
+                       kfree_skb(skb);
+               } else {
+                       if (sock_queue_rcv_skb(&ipc->sk, skb))
+                               kfree_skb(skb);
+
+                       qrtr_port_put(ipc);
+               }
+
+               if (confirm) {
+                       skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
+                       if (!skb)
+                               break;
+                       if (qrtr_node_enqueue(node, skb))
+                               break;
+               }
+       }
+}
+
+/**
+ * qrtr_endpoint_register() - register a new endpoint
+ * @ep: endpoint to register
+ * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
+ * Return: 0 on success; negative error code on failure
+ *
+ * The specified endpoint must have the xmit function pointer set on call.
+ */
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
+{
+       struct qrtr_node *node;
+
+       if (!ep || !ep->xmit)
+               return -EINVAL;
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       INIT_WORK(&node->work, qrtr_node_rx_work);
+       kref_init(&node->ref);
+       mutex_init(&node->ep_lock);
+       skb_queue_head_init(&node->rx_queue);
+       node->nid = QRTR_EP_NID_AUTO;
+       node->ep = ep;
+
+       qrtr_node_assign(node, nid);
+
+       mutex_lock(&qrtr_node_lock);
+       list_add(&node->item, &qrtr_all_nodes);
+       mutex_unlock(&qrtr_node_lock);
+       ep->node = node;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
+
+/**
+ * qrtr_endpoint_unregister - unregister endpoint
+ * @ep: endpoint to unregister
+ */
+void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
+{
+       struct qrtr_node *node = ep->node;
+
+       mutex_lock(&node->ep_lock);
+       node->ep = NULL;
+       mutex_unlock(&node->ep_lock);
+
+       qrtr_node_release(node);
+       ep->node = NULL;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
+
+/* Lookup socket by port.
+ *
+ * Callers must release with qrtr_port_put()
+ */
+static struct qrtr_sock *qrtr_port_lookup(int port)
+{
+       struct qrtr_sock *ipc;
+
+       if (port == QRTR_PORT_CTRL)
+               port = 0;
+
+       mutex_lock(&qrtr_port_lock);
+       ipc = idr_find(&qrtr_ports, port);
+       if (ipc)
+               sock_hold(&ipc->sk);
+       mutex_unlock(&qrtr_port_lock);
+
+       return ipc;
+}
+
+/* Release acquired socket. */
+static void qrtr_port_put(struct qrtr_sock *ipc)
+{
+       sock_put(&ipc->sk);
+}
+
+/* Remove port assignment. */
+static void qrtr_port_remove(struct qrtr_sock *ipc)
+{
+       int port = ipc->us.sq_port;
+
+       if (port == QRTR_PORT_CTRL)
+               port = 0;
+
+       __sock_put(&ipc->sk);
+
+       mutex_lock(&qrtr_port_lock);
+       idr_remove(&qrtr_ports, port);
+       mutex_unlock(&qrtr_port_lock);
+}
+
+/* Assign port number to socket.
+ *
+ * Specify port in the integer pointed to by port, and it will be adjusted
+ * on return as necesssary.
+ *
+ * Port may be:
+ *   0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
+ *   <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
+ *   >QRTR_MIN_EPH_SOCKET: Specified; available to all
+ */
+static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
+{
+       int rc;
+
+       mutex_lock(&qrtr_port_lock);
+       if (!*port) {
+               rc = idr_alloc(&qrtr_ports, ipc,
+                              QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
+                              GFP_ATOMIC);
+               if (rc >= 0)
+                       *port = rc;
+       } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
+               rc = -EACCES;
+       } else if (*port == QRTR_PORT_CTRL) {
+               rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+       } else {
+               rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
+               if (rc >= 0)
+                       *port = rc;
+       }
+       mutex_unlock(&qrtr_port_lock);
+
+       if (rc == -ENOSPC)
+               return -EADDRINUSE;
+       else if (rc < 0)
+               return rc;
+
+       sock_hold(&ipc->sk);
+
+       return 0;
+}
+
+/* Bind socket to address.
+ *
+ * Socket should be locked upon call.
+ */
+static int __qrtr_bind(struct socket *sock,
+                      const struct sockaddr_qrtr *addr, int zapped)
+{
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       int port;
+       int rc;
+
+       /* rebinding ok */
+       if (!zapped && addr->sq_port == ipc->us.sq_port)
+               return 0;
+
+       port = addr->sq_port;
+       rc = qrtr_port_assign(ipc, &port);
+       if (rc)
+               return rc;
+
+       /* unbind previous, if any */
+       if (!zapped)
+               qrtr_port_remove(ipc);
+       ipc->us.sq_port = port;
+
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
+       return 0;
+}
+
+/* Auto bind to an ephemeral port. */
+static int qrtr_autobind(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       struct sockaddr_qrtr addr;
+
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       addr.sq_family = AF_QIPCRTR;
+       addr.sq_node = qrtr_local_nid;
+       addr.sq_port = 0;
+
+       return __qrtr_bind(sock, &addr, 1);
+}
+
+/* Bind socket to specified sockaddr. */
+static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+       DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       int rc;
+
+       if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+               return -EINVAL;
+
+       if (addr->sq_node != ipc->us.sq_node)
+               return -EINVAL;
+
+       lock_sock(sk);
+       rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
+       release_sock(sk);
+
+       return rc;
+}
+
+/* Queue packet to local peer socket. */
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+       const struct qrtr_hdr *phdr;
+       struct qrtr_sock *ipc;
+
+       phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+
+       ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
+       if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+               qrtr_port_put(ipc);
+               kfree_skb(skb);
+               return -ENOSPC;
+       }
+
+       qrtr_port_put(ipc);
+
+       return 0;
+}
+
+/* Queue packet for broadcast. */
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+       struct sk_buff *skbn;
+
+       mutex_lock(&qrtr_node_lock);
+       list_for_each_entry(node, &qrtr_all_nodes, item) {
+               skbn = skb_clone(skb, GFP_KERNEL);
+               if (!skbn)
+                       break;
+               skb_set_owner_w(skbn, skb->sk);
+               qrtr_node_enqueue(node, skbn);
+       }
+       mutex_unlock(&qrtr_node_lock);
+
+       qrtr_local_enqueue(node, skb);
+
+       return 0;
+}
+
+static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+       DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+       int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       struct qrtr_node *node;
+       struct qrtr_hdr *hdr;
+       struct sk_buff *skb;
+       size_t plen;
+       int rc;
+
+       if (msg->msg_flags & ~(MSG_DONTWAIT))
+               return -EINVAL;
+
+       if (len > 65535)
+               return -EMSGSIZE;
+
+       lock_sock(sk);
+
+       if (addr) {
+               if (msg->msg_namelen < sizeof(*addr)) {
+                       release_sock(sk);
+                       return -EINVAL;
+               }
+
+               if (addr->sq_family != AF_QIPCRTR) {
+                       release_sock(sk);
+                       return -EINVAL;
+               }
+
+               rc = qrtr_autobind(sock);
+               if (rc) {
+                       release_sock(sk);
+                       return rc;
+               }
+       } else if (sk->sk_state == TCP_ESTABLISHED) {
+               addr = &ipc->peer;
+       } else {
+               release_sock(sk);
+               return -ENOTCONN;
+       }
+
+       node = NULL;
+       if (addr->sq_node == QRTR_NODE_BCAST) {
+               enqueue_fn = qrtr_bcast_enqueue;
+       } else if (addr->sq_node == ipc->us.sq_node) {
+               enqueue_fn = qrtr_local_enqueue;
+       } else {
+               enqueue_fn = qrtr_node_enqueue;
+               node = qrtr_node_lookup(addr->sq_node);
+               if (!node) {
+                       release_sock(sk);
+                       return -ECONNRESET;
+               }
+       }
+
+       plen = (len + 3) & ~3;
+       skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
+                                 msg->msg_flags & MSG_DONTWAIT, &rc);
+       if (!skb)
+               goto out_node;
+
+       skb_reset_transport_header(skb);
+       skb_put(skb, len + QRTR_HDR_SIZE);
+
+       hdr = (struct qrtr_hdr *)skb_transport_header(skb);
+       hdr->version = cpu_to_le32(QRTR_PROTO_VER);
+       hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
+       hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
+       hdr->confirm_rx = cpu_to_le32(0);
+       hdr->size = cpu_to_le32(len);
+       hdr->dst_node_id = cpu_to_le32(addr->sq_node);
+       hdr->dst_port_id = cpu_to_le32(addr->sq_port);
+
+       rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
+                                        &msg->msg_iter, len);
+       if (rc) {
+               kfree_skb(skb);
+               goto out_node;
+       }
+
+       if (plen != len) {
+               skb_pad(skb, plen - len);
+               skb_put(skb, plen - len);
+       }
+
+       if (ipc->us.sq_port == QRTR_PORT_CTRL) {
+               if (len < 4) {
+                       rc = -EINVAL;
+                       kfree_skb(skb);
+                       goto out_node;
+               }
+
+               /* control messages already require the type as 'command' */
+               skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
+       } else {
+               hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
+       }
+
+       rc = enqueue_fn(node, skb);
+       if (rc >= 0)
+               rc = len;
+
+out_node:
+       qrtr_node_release(node);
+       release_sock(sk);
+
+       return rc;
+}
+
+static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
+                       size_t size, int flags)
+{
+       DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+       const struct qrtr_hdr *phdr;
+       struct sock *sk = sock->sk;
+       struct sk_buff *skb;
+       int copied, rc;
+
+       lock_sock(sk);
+
+       if (sock_flag(sk, SOCK_ZAPPED)) {
+               release_sock(sk);
+               return -EADDRNOTAVAIL;
+       }
+
+       skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+                               flags & MSG_DONTWAIT, &rc);
+       if (!skb) {
+               release_sock(sk);
+               return rc;
+       }
+
+       phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+       copied = le32_to_cpu(phdr->size);
+       if (copied > size) {
+               copied = size;
+               msg->msg_flags |= MSG_TRUNC;
+       }
+
+       rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
+       if (rc < 0)
+               goto out;
+       rc = copied;
+
+       if (addr) {
+               addr->sq_family = AF_QIPCRTR;
+               addr->sq_node = le32_to_cpu(phdr->src_node_id);
+               addr->sq_port = le32_to_cpu(phdr->src_port_id);
+               msg->msg_namelen = sizeof(*addr);
+       }
+
+out:
+       skb_free_datagram(sk, skb);
+       release_sock(sk);
+
+       return rc;
+}
+
+static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
+                       int len, int flags)
+{
+       DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       int rc;
+
+       if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       sk->sk_state = TCP_CLOSE;
+       sock->state = SS_UNCONNECTED;
+
+       rc = qrtr_autobind(sock);
+       if (rc) {
+               release_sock(sk);
+               return rc;
+       }
+
+       ipc->peer = *addr;
+       sock->state = SS_CONNECTED;
+       sk->sk_state = TCP_ESTABLISHED;
+
+       release_sock(sk);
+
+       return 0;
+}
+
+static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
+                       int *len, int peer)
+{
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sockaddr_qrtr qaddr;
+       struct sock *sk = sock->sk;
+
+       lock_sock(sk);
+       if (peer) {
+               if (sk->sk_state != TCP_ESTABLISHED) {
+                       release_sock(sk);
+                       return -ENOTCONN;
+               }
+
+               qaddr = ipc->peer;
+       } else {
+               qaddr = ipc->us;
+       }
+       release_sock(sk);
+
+       *len = sizeof(qaddr);
+       qaddr.sq_family = AF_QIPCRTR;
+
+       memcpy(saddr, &qaddr, sizeof(qaddr));
+
+       return 0;
+}
+
+static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+       struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       struct sockaddr_qrtr *sq;
+       struct sk_buff *skb;
+       struct ifreq ifr;
+       long len = 0;
+       int rc = 0;
+
+       lock_sock(sk);
+
+       switch (cmd) {
+       case TIOCOUTQ:
+               len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+               if (len < 0)
+                       len = 0;
+               rc = put_user(len, (int __user *)argp);
+               break;
+       case TIOCINQ:
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb)
+                       len = skb->len - QRTR_HDR_SIZE;
+               rc = put_user(len, (int __user *)argp);
+               break;
+       case SIOCGIFADDR:
+               if (copy_from_user(&ifr, argp, sizeof(ifr))) {
+                       rc = -EFAULT;
+                       break;
+               }
+
+               sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
+               *sq = ipc->us;
+               if (copy_to_user(argp, &ifr, sizeof(ifr))) {
+                       rc = -EFAULT;
+                       break;
+               }
+               break;
+       case SIOCGSTAMP:
+               rc = sock_get_timestamp(sk, argp);
+               break;
+       case SIOCADDRT:
+       case SIOCDELRT:
+       case SIOCSIFADDR:
+       case SIOCGIFDSTADDR:
+       case SIOCSIFDSTADDR:
+       case SIOCGIFBRDADDR:
+       case SIOCSIFBRDADDR:
+       case SIOCGIFNETMASK:
+       case SIOCSIFNETMASK:
+               rc = -EINVAL;
+               break;
+       default:
+               rc = -ENOIOCTLCMD;
+               break;
+       }
+
+       release_sock(sk);
+
+       return rc;
+}
+
+static int qrtr_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       struct qrtr_sock *ipc;
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+
+       ipc = qrtr_sk(sk);
+       sk->sk_shutdown = SHUTDOWN_MASK;
+       if (!sock_flag(sk, SOCK_DEAD))
+               sk->sk_state_change(sk);
+
+       sock_set_flag(sk, SOCK_DEAD);
+       sock->sk = NULL;
+
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               qrtr_port_remove(ipc);
+
+       skb_queue_purge(&sk->sk_receive_queue);
+
+       release_sock(sk);
+       sock_put(sk);
+
+       return 0;
+}
+
+static const struct proto_ops qrtr_proto_ops = {
+       .owner          = THIS_MODULE,
+       .family         = AF_QIPCRTR,
+       .bind           = qrtr_bind,
+       .connect        = qrtr_connect,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .listen         = sock_no_listen,
+       .sendmsg        = qrtr_sendmsg,
+       .recvmsg        = qrtr_recvmsg,
+       .getname        = qrtr_getname,
+       .ioctl          = qrtr_ioctl,
+       .poll           = datagram_poll,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .release        = qrtr_release,
+       .mmap           = sock_no_mmap,
+       .sendpage       = sock_no_sendpage,
+};
+
+static struct proto qrtr_proto = {
+       .name           = "QIPCRTR",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct qrtr_sock),
+};
+
+static int qrtr_create(struct net *net, struct socket *sock,
+                      int protocol, int kern)
+{
+       struct qrtr_sock *ipc;
+       struct sock *sk;
+
+       if (sock->type != SOCK_DGRAM)
+               return -EPROTOTYPE;
+
+       sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_set_flag(sk, SOCK_ZAPPED);
+
+       sock_init_data(sock, sk);
+       sock->ops = &qrtr_proto_ops;
+
+       ipc = qrtr_sk(sk);
+       ipc->us.sq_family = AF_QIPCRTR;
+       ipc->us.sq_node = qrtr_local_nid;
+       ipc->us.sq_port = 0;
+
+       return 0;
+}
+
+static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
+       [IFA_LOCAL] = { .type = NLA_U32 },
+};
+
+static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct nlattr *tb[IFA_MAX + 1];
+       struct ifaddrmsg *ifm;
+       int rc;
+
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
+               return -EPERM;
+
+       ASSERT_RTNL();
+
+       rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
+       if (rc < 0)
+               return rc;
+
+       ifm = nlmsg_data(nlh);
+       if (!tb[IFA_LOCAL])
+               return -EINVAL;
+
+       qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
+       return 0;
+}
+
+static const struct net_proto_family qrtr_family = {
+       .owner  = THIS_MODULE,
+       .family = AF_QIPCRTR,
+       .create = qrtr_create,
+};
+
+static int __init qrtr_proto_init(void)
+{
+       int rc;
+
+       rc = proto_register(&qrtr_proto, 1);
+       if (rc)
+               return rc;
+
+       rc = sock_register(&qrtr_family);
+       if (rc) {
+               proto_unregister(&qrtr_proto);
+               return rc;
+       }
+
+       rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
+
+       return 0;
+}
+module_init(qrtr_proto_init);
+
+static void __exit qrtr_proto_fini(void)
+{
+       rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
+       sock_unregister(qrtr_family.family);
+       proto_unregister(&qrtr_proto);
+}
+module_exit(qrtr_proto_fini);
+
+MODULE_DESCRIPTION("Qualcomm IPC-router driver");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
new file mode 100644 (file)
index 0000000..2b84871
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __QRTR_H_
+#define __QRTR_H_
+
+#include <linux/types.h>
+
+struct sk_buff;
+
+/* endpoint node id auto assignment */
+#define QRTR_EP_NID_AUTO (-1)
+
+/**
+ * struct qrtr_endpoint - endpoint handle
+ * @xmit: Callback for outgoing packets
+ *
+ * The socket buffer passed to the xmit function becomes owned by the endpoint
+ * driver.  As such, when the driver is done with the buffer, it should
+ * call kfree_skb() on failure, or consume_skb() on success.
+ */
+struct qrtr_endpoint {
+       int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb);
+       /* private: not for endpoint use */
+       struct qrtr_node *node;
+};
+
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid);
+
+void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
+
+int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
+
+#endif
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
new file mode 100644 (file)
index 0000000..84ebce7
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/soc/qcom/smd.h>
+
+#include "qrtr.h"
+
+struct qrtr_smd_dev {
+       struct qrtr_endpoint ep;
+       struct qcom_smd_channel *channel;
+};
+
+/* from smd to qrtr */
+static int qcom_smd_qrtr_callback(struct qcom_smd_device *sdev,
+                                 const void *data, size_t len)
+{
+       struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev);
+       int rc;
+
+       if (!qdev)
+               return -EAGAIN;
+
+       rc = qrtr_endpoint_post(&qdev->ep, data, len);
+       if (rc == -EINVAL) {
+               dev_err(&sdev->dev, "invalid ipcrouter packet\n");
+               /* return 0 to let smd drop the packet */
+               rc = 0;
+       }
+
+       return rc;
+}
+
+/* from qrtr to smd */
+static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+       struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep);
+       int rc;
+
+       rc = skb_linearize(skb);
+       if (rc)
+               goto out;
+
+       rc = qcom_smd_send(qdev->channel, skb->data, skb->len);
+
+out:
+       if (rc)
+               kfree_skb(skb);
+       else
+               consume_skb(skb);
+       return rc;
+}
+
+static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev)
+{
+       struct qrtr_smd_dev *qdev;
+       int rc;
+
+       qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL);
+       if (!qdev)
+               return -ENOMEM;
+
+       qdev->channel = sdev->channel;
+       qdev->ep.xmit = qcom_smd_qrtr_send;
+
+       rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+       if (rc)
+               return rc;
+
+       dev_set_drvdata(&sdev->dev, qdev);
+
+       dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n");
+
+       return 0;
+}
+
+static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev)
+{
+       struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev);
+
+       qrtr_endpoint_unregister(&qdev->ep);
+
+       dev_set_drvdata(&sdev->dev, NULL);
+}
+
+static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = {
+       { "IPCRTR" },
+       {}
+};
+
+static struct qcom_smd_driver qcom_smd_qrtr_driver = {
+       .probe = qcom_smd_qrtr_probe,
+       .remove = qcom_smd_qrtr_remove,
+       .callback = qcom_smd_qrtr_callback,
+       .smd_match_table = qcom_smd_qrtr_smd_match,
+       .driver = {
+               .name = "qcom_smd_qrtr",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_qcom_smd_driver(qcom_smd_qrtr_driver);
+
+MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
+MODULE_LICENSE("GPL v2");
index e6144b8246fd27fe49bffd228b44a44c3e7cbd81..6641bcf7c18505f1d653a32c9c5dc0455926dfd8 100644 (file)
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __set_bit_le(off, (void *)map->m_page_addrs[i]);
+       set_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+       clear_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
index 8764970f0c24179b8470a27c1ab0d45567a0d90f..310cabce23111cfaa45af97adef28f4d41d1bbda 100644 (file)
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
                dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
                dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
                dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
-               dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
+               dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
 
                /* Advertise flow control */
                if (ic->i_flowctl) {
index 977fb86065b75dbef916bd0acb9b94876c0f5c04..abc8cc805e8d063813d496d984e95a0c078ca0d3 100644 (file)
@@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
 
                addr = kmap_atomic(sg_page(&frag->f_sg));
 
-               src = addr + frag_off;
+               src = addr + frag->f_sg.offset + frag_off;
                dst = (void *)map->m_page_addrs[map_page] + map_off;
                for (k = 0; k < to_copy; k += 8) {
                        /* Record ports that became uncongested, ie
index 616f21f4e7d7c9f9987c1a0206bab68e589040d7..e2b5a5832d3d52888f11e8730a27d937510e151d 100644 (file)
@@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
                        if (rem->r_offset != 0)
                                rds_stats_inc(s_page_remainder_hit);
 
-                       rem->r_offset += bytes;
-                       if (rem->r_offset == PAGE_SIZE) {
+                       rem->r_offset += ALIGN(bytes, 8);
+                       if (rem->r_offset >= PAGE_SIZE) {
                                __free_page(rem->r_page);
                                rem->r_page = NULL;
                        }
index 61ed2a8764ba4c7a66788d7843cef18abb9e904a..86187dad14403100ff6199ee79beed5262de818f 100644 (file)
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 
 /*
  * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set.  The RDS_CONN_UP bit protects those paths from being
  * called while it isn't set.
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
        if (!tc)
                return -ENOMEM;
 
+       mutex_init(&tc->t_conn_lock);
        tc->t_sock = NULL;
        tc->t_tinc = NULL;
        tc->t_tinc_hdr_rem = sizeof(struct rds_header);
index 64f873c0c6b6d15574ec9771506d04e617a6aaac..41c228300525c029df02472b609ada1a71cea98e 100644 (file)
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
 
        struct list_head        t_tcp_node;
        struct rds_connection   *conn;
+       /* t_conn_lock synchronizes the connection establishment between
+        * rds_tcp_accept_one and rds_tcp_conn_connect
+        */
+       struct mutex            t_conn_lock;
        struct socket           *t_sock;
        void                    *t_orig_write_space;
        void                    *t_orig_data_ready;
index 5cb16875c4603dba71c733de6600ada40e39cffc..49a3fcfed360edfb146416976c700b24e4520864 100644 (file)
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        struct socket *sock = NULL;
        struct sockaddr_in src, dest;
        int ret;
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+
+       mutex_lock(&tc->t_conn_lock);
 
+       if (rds_conn_up(conn)) {
+               mutex_unlock(&tc->t_conn_lock);
+               return 0;
+       }
        ret = sock_create_kern(rds_conn_net(conn), PF_INET,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        }
 
 out:
+       mutex_unlock(&tc->t_conn_lock);
        if (sock)
                sock_release(sock);
        return ret;
index 0936a4a32b476fdde5c7208fc465ec3324bbcf09..be263cdf268bae5b59a4746d4011d1042ede58cd 100644 (file)
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
        struct rds_connection *conn;
        int ret;
        struct inet_sock *inet;
-       struct rds_tcp_connection *rs_tcp;
+       struct rds_tcp_connection *rs_tcp = NULL;
+       int conn_state;
+       struct sock *nsk;
 
        ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
                               sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
         * rds_tcp_state_change() will do that cleanup
         */
        rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-       if (rs_tcp->t_sock &&
-           ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
-               struct sock *nsk = new_sock->sk;
-
-               nsk->sk_user_data = NULL;
-               nsk->sk_prot->disconnect(nsk, 0);
-               tcp_done(nsk);
-               new_sock = NULL;
-               ret = 0;
-               goto out;
-       } else if (rs_tcp->t_sock) {
-               rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
-               conn->c_outgoing = 0;
-       }
-
        rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+       mutex_lock(&rs_tcp->t_conn_lock);
+       conn_state = rds_conn_state(conn);
+       if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+               goto rst_nsk;
+       if (rs_tcp->t_sock) {
+               /* Need to resolve a duelling SYN between peers.
+                * We have an outstanding SYN to this peer, which may
+                * potentially have transitioned to the RDS_CONN_UP state,
+                * so we must quiesce any send threads before resetting
+                * c_transport_data.
+                */
+               wait_event(conn->c_waitq,
+                          !test_bit(RDS_IN_XMIT, &conn->c_flags));
+               if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+                       goto rst_nsk;
+               } else if (rs_tcp->t_sock) {
+                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       conn->c_outgoing = 0;
+               }
+       }
        rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn);
+       rds_connect_complete(conn); /* marks RDS_CONN_UP */
+       new_sock = NULL;
+       ret = 0;
+       goto out;
+rst_nsk:
+       /* reset the newly returned accept sock and bail */
+       nsk = new_sock->sk;
+       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+       nsk->sk_user_data = NULL;
+       nsk->sk_prot->disconnect(nsk, 0);
+       tcp_done(nsk);
        new_sock = NULL;
        ret = 0;
-
 out:
+       if (rs_tcp)
+               mutex_unlock(&rs_tcp->t_conn_lock);
        if (new_sock)
                sock_release(new_sock);
        return ret;
index 27a992154804c685d983206b599ef7a5bf96e8af..d75d8b56a9e3aef607b2f5b7f3d29fbe9e9540e3 100644 (file)
@@ -207,22 +207,14 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
                }
 
                if (left && tc->t_tinc_data_rem) {
-                       clone = skb_clone(skb, arg->gfp);
+                       to_copy = min(tc->t_tinc_data_rem, left);
+
+                       clone = pskb_extract(skb, offset, to_copy, arg->gfp);
                        if (!clone) {
                                desc->error = -ENOMEM;
                                goto out;
                        }
 
-                       to_copy = min(tc->t_tinc_data_rem, left);
-                       if (!pskb_pull(clone, offset) ||
-                           pskb_trim(clone, to_copy)) {
-                               pr_warn("rds_tcp_data_recv: pull/trim failed "
-                                       "left %zu data_rem %zu skb_len %d\n",
-                                       left, tc->t_tinc_data_rem, skb->len);
-                               kfree_skb(clone);
-                               desc->error = -ENOMEM;
-                               goto out;
-                       }
                        skb_queue_tail(&tinc->ti_skb_list, clone);
 
                        rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
index 23dcef12b986dcd5774147b5ed215ff2fa62cb50..784c53163b7b045ac8c98c60da8e227886a40d85 100644 (file)
@@ -30,7 +30,7 @@ config AF_RXRPC_DEBUG
 
 
 config RXKAD
-       tristate "RxRPC Kerberos security"
+       bool "RxRPC Kerberos security"
        depends on AF_RXRPC
        select CRYPTO
        select CRYPTO_MANAGER
index ec126f91276b349c7fe8e181ad4ce7aec24f0c67..e05a06ef2254248519616b06dd9dac6c6bf2a2fb 100644 (file)
@@ -18,11 +18,12 @@ af-rxrpc-y := \
        ar-recvmsg.o \
        ar-security.o \
        ar-skbuff.o \
-       ar-transport.o
+       ar-transport.o \
+       insecure.o \
+       misc.o
 
 af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
+af-rxrpc-$(CONFIG_RXKAD) += rxkad.o
 af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
 
 obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
-
-obj-$(CONFIG_RXKAD) += rxkad.o
index 9d935fa5a2a98884beca08329433cf66dbafebdc..e45e94ca030f3b1e3a6881d3178b021b4ce670ad 100644 (file)
@@ -806,6 +806,12 @@ static int __init af_rxrpc_init(void)
                goto error_work_queue;
        }
 
+       ret = rxrpc_init_security();
+       if (ret < 0) {
+               printk(KERN_CRIT "RxRPC: Cannot initialise security\n");
+               goto error_security;
+       }
+
        ret = proto_register(&rxrpc_proto, 1);
        if (ret < 0) {
                printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
@@ -853,6 +859,8 @@ error_sock:
        proto_unregister(&rxrpc_proto);
 error_proto:
        destroy_workqueue(rxrpc_workqueue);
+error_security:
+       rxrpc_exit_security();
 error_work_queue:
        kmem_cache_destroy(rxrpc_call_jar);
 error_call_jar:
@@ -883,6 +891,7 @@ static void __exit af_rxrpc_exit(void)
        remove_proc_entry("rxrpc_conns", init_net.proc_net);
        remove_proc_entry("rxrpc_calls", init_net.proc_net);
        destroy_workqueue(rxrpc_workqueue);
+       rxrpc_exit_security();
        kmem_cache_destroy(rxrpc_call_jar);
        _leave("");
 }
index 277731a5e67a523988673d5f7dccfc8de6ac47a8..e7a7f05f13e2b0b39d921cf312e9a6aeb6599571 100644 (file)
@@ -108,7 +108,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
                goto error;
        }
 
-       conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
+       conn = rxrpc_incoming_connection(trans, &sp->hdr);
        rxrpc_put_transport(trans);
        if (IS_ERR(conn)) {
                _debug("no conn");
@@ -116,7 +116,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
                goto error;
        }
 
-       call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
+       call = rxrpc_incoming_call(rx, conn, &sp->hdr);
        rxrpc_put_connection(conn);
        if (IS_ERR(call)) {
                _debug("no call");
index 16d967075eaf8f1c4cc15ea0ac926cd97277d2d8..374478e006e7c806a82b9dd907907f39e1d4cd5c 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-/*
- * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
- */
-unsigned int rxrpc_requested_ack_delay = 1;
-
-/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
- *
- * We use this when we've received new data packets.  If those packets aren't
- * all consumed within this time we will send a DELAY ACK if an ACK was not
- * requested to let the sender know it doesn't need to resend.
- */
-unsigned int rxrpc_soft_ack_delay = 1 * HZ;
-
-/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
- *
- * We use this when we've consumed some previously soft-ACK'd packets when
- * further packets aren't immediately received to decide when to send an IDLE
- * ACK let the other end know that it can free up its Tx buffer space.
- */
-unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
-
-/*
- * Receive window size in packets.  This indicates the maximum number of
- * unconsumed received packets we're willing to retain in memory.  Once this
- * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
- * packets.
- */
-unsigned int rxrpc_rx_window_size = 32;
-
-/*
- * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
- * made by gluing normal packets together that we're willing to handle.
- */
-unsigned int rxrpc_rx_mtu = 5692;
-
-/*
- * The maximum number of fragments in a received jumbo packet that we tell the
- * sender that we're willing to handle.
- */
-unsigned int rxrpc_rx_jumbo_max = 4;
-
-static const char *rxrpc_acks(u8 reason)
-{
-       static const char *const str[] = {
-               "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
-               "IDL", "-?-"
-       };
-
-       if (reason >= ARRAY_SIZE(str))
-               reason = ARRAY_SIZE(str) - 1;
-       return str[reason];
-}
-
-static const s8 rxrpc_ack_priority[] = {
-       [0]                             = 0,
-       [RXRPC_ACK_DELAY]               = 1,
-       [RXRPC_ACK_REQUESTED]           = 2,
-       [RXRPC_ACK_IDLE]                = 3,
-       [RXRPC_ACK_PING_RESPONSE]       = 4,
-       [RXRPC_ACK_DUPLICATE]           = 5,
-       [RXRPC_ACK_OUT_OF_SEQUENCE]     = 6,
-       [RXRPC_ACK_EXCEEDS_WINDOW]      = 7,
-       [RXRPC_ACK_NOSPACE]             = 8,
-};
-
 /*
  * propose an ACK be sent
  */
@@ -426,7 +358,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
        int tail = call->acks_tail, old_tail;
        int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
 
-       kenter("{%u,%u},%u", call->acks_hard, win, hard);
+       _enter("{%u,%u},%u", call->acks_hard, win, hard);
 
        ASSERTCMP(hard - call->acks_hard, <=, win);
 
@@ -656,7 +588,8 @@ process_further:
                _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
 
                /* secured packets must be verified and possibly decrypted */
-               if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
+               if (call->conn->security->verify_packet(call, skb,
+                                                       _abort_code) < 0)
                        goto protocol_error;
 
                rxrpc_insert_oos_packet(call, skb);
@@ -901,8 +834,8 @@ void rxrpc_process_call(struct work_struct *work)
 
        /* there's a good chance we're going to have to send a message, so set
         * one up in advance */
-       msg.msg_name    = &call->conn->trans->peer->srx.transport.sin;
-       msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
+       msg.msg_name    = &call->conn->trans->peer->srx.transport;
+       msg.msg_namelen = call->conn->trans->peer->srx.transport_len;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
@@ -973,7 +906,7 @@ void rxrpc_process_call(struct work_struct *work)
                                       ECONNABORTED, true) < 0)
                        goto no_mem;
                whdr.type = RXRPC_PACKET_TYPE_ABORT;
-               data = htonl(call->abort_code);
+               data = htonl(call->local_abort);
                iov[1].iov_base = &data;
                iov[1].iov_len = sizeof(data);
                genbit = RXRPC_CALL_EV_ABORT;
@@ -1036,7 +969,7 @@ void rxrpc_process_call(struct work_struct *work)
                write_lock_bh(&call->state_lock);
                if (call->state <= RXRPC_CALL_COMPLETE) {
                        call->state = RXRPC_CALL_LOCALLY_ABORTED;
-                       call->abort_code = RX_CALL_TIMEOUT;
+                       call->local_abort = RX_CALL_TIMEOUT;
                        set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                }
                write_unlock_bh(&call->state_lock);
index 7c8d300ade9bb32079b716b4fced72dce51f585b..571a41fd5a324705a30e78304833b39c8c501380 100644 (file)
@@ -411,18 +411,17 @@ found_extant_second:
  */
 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
                                       struct rxrpc_connection *conn,
-                                      struct rxrpc_host_header *hdr,
-                                      gfp_t gfp)
+                                      struct rxrpc_host_header *hdr)
 {
        struct rxrpc_call *call, *candidate;
        struct rb_node **p, *parent;
        u32 call_id;
 
-       _enter(",%d,,%x", conn->debug_id, gfp);
+       _enter(",%d", conn->debug_id);
 
        ASSERT(rx != NULL);
 
-       candidate = rxrpc_alloc_call(gfp);
+       candidate = rxrpc_alloc_call(GFP_NOIO);
        if (!candidate)
                return ERR_PTR(-EBUSY);
 
@@ -682,7 +681,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
            call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
                _debug("+++ ABORTING STATE %d +++\n", call->state);
                call->state = RXRPC_CALL_LOCALLY_ABORTED;
-               call->abort_code = RX_CALL_DEAD;
+               call->local_abort = RX_CALL_DEAD;
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                rxrpc_queue_call(call);
        }
@@ -758,7 +757,7 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call)
                if (call->state < RXRPC_CALL_COMPLETE) {
                        _debug("abort call %p", call);
                        call->state = RXRPC_CALL_LOCALLY_ABORTED;
-                       call->abort_code = RX_CALL_DEAD;
+                       call->local_abort = RX_CALL_DEAD;
                        if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
                                sched = true;
                }
index 9942da1edbf6c00a4f5acee07de7c0c92069f121..97f4fae74bcab5c2305801d85dca51a3295e3663 100644 (file)
@@ -207,6 +207,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
                INIT_LIST_HEAD(&conn->bundle_link);
                conn->calls = RB_ROOT;
                skb_queue_head_init(&conn->rx_queue);
+               conn->security = &rxrpc_no_security;
                rwlock_init(&conn->lock);
                spin_lock_init(&conn->state_lock);
                atomic_set(&conn->usage, 1);
@@ -564,8 +565,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
                     candidate->debug_id, candidate->trans->debug_id);
 
                rxrpc_assign_connection_id(candidate);
-               if (candidate->security)
-                       candidate->security->prime_packet_security(candidate);
+               candidate->security->prime_packet_security(candidate);
 
                /* leave the candidate lurking in zombie mode attached to the
                 * bundle until we're ready for it */
@@ -619,8 +619,7 @@ interrupted:
  */
 struct rxrpc_connection *
 rxrpc_incoming_connection(struct rxrpc_transport *trans,
-                         struct rxrpc_host_header *hdr,
-                         gfp_t gfp)
+                         struct rxrpc_host_header *hdr)
 {
        struct rxrpc_connection *conn, *candidate = NULL;
        struct rb_node *p, **pp;
@@ -659,7 +658,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
 
        /* not yet present - create a candidate for a new record and then
         * redo the search */
-       candidate = rxrpc_alloc_connection(gfp);
+       candidate = rxrpc_alloc_connection(GFP_NOIO);
        if (!candidate) {
                _leave(" = -ENOMEM");
                return ERR_PTR(-ENOMEM);
@@ -831,7 +830,10 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
        ASSERT(RB_EMPTY_ROOT(&conn->calls));
        rxrpc_purge_queue(&conn->rx_queue);
 
-       rxrpc_clear_conn_security(conn);
+       conn->security->clear(conn);
+       key_put(conn->key);
+       key_put(conn->server_key);
+
        rxrpc_put_transport(conn->trans);
        kfree(conn);
        _leave("");
index 1bdaaed8cdc456c419a9da5ce95412d1853ef06c..5f9563968a5b498e837bef9baf2921867a8073da 100644 (file)
@@ -40,11 +40,13 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
                write_lock(&call->state_lock);
                if (call->state <= RXRPC_CALL_COMPLETE) {
                        call->state = state;
-                       call->abort_code = abort_code;
-                       if (state == RXRPC_CALL_LOCALLY_ABORTED)
+                       if (state == RXRPC_CALL_LOCALLY_ABORTED) {
+                               call->local_abort = conn->local_abort;
                                set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
-                       else
+                       } else {
+                               call->remote_abort = conn->remote_abort;
                                set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
+                       }
                        rxrpc_queue_call(call);
                }
                write_unlock(&call->state_lock);
@@ -84,8 +86,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
 
        rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
 
-       msg.msg_name    = &conn->trans->peer->srx.transport.sin;
-       msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
+       msg.msg_name    = &conn->trans->peer->srx.transport;
+       msg.msg_namelen = conn->trans->peer->srx.transport_len;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
@@ -101,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        whdr._rsvd      = 0;
        whdr.serviceId  = htons(conn->service_id);
 
-       word = htonl(abort_code);
+       word            = htonl(conn->local_abort);
 
        iov[0].iov_base = &whdr;
        iov[0].iov_len  = sizeof(whdr);
@@ -112,7 +114,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
 
        serial = atomic_inc_return(&conn->serial);
        whdr.serial = htonl(serial);
-       _proto("Tx CONN ABORT %%%u { %d }", serial, abort_code);
+       _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
 
        ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
        if (ret < 0) {
@@ -172,15 +174,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
-               if (conn->security)
-                       return conn->security->respond_to_challenge(
-                               conn, skb, _abort_code);
-               return -EPROTO;
+               return conn->security->respond_to_challenge(conn, skb,
+                                                           _abort_code);
 
        case RXRPC_PACKET_TYPE_RESPONSE:
-               if (!conn->security)
-                       return -EPROTO;
-
                ret = conn->security->verify_response(conn, skb, _abort_code);
                if (ret < 0)
                        return ret;
@@ -236,8 +233,6 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
                }
        }
 
-       ASSERT(conn->security != NULL);
-
        if (conn->security->issue_challenge(conn) < 0) {
                abort_code = RX_CALL_DEAD;
                ret = -ENOMEM;
index 63ed75c40e29a1739470f90e21d5f60d1bab8fe1..6ff97412a0bb15450b42f412be02ab8d3d2edbd2 100644 (file)
 #include <net/net_namespace.h>
 #include "ar-internal.h"
 
-const char *rxrpc_pkts[] = {
-       "?00",
-       "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
-       "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
-};
-
 /*
  * queue a packet for recvmsg to pass to userspace
  * - the caller must hold a lock on call->lock
@@ -199,7 +193,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
 
        /* if the packet need security things doing to it, then it goes down
         * the slow path */
-       if (call->conn->security)
+       if (call->conn->security_ix)
                goto enqueue_packet;
 
        sp->call = call;
@@ -355,7 +349,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
                write_lock_bh(&call->state_lock);
                if (call->state < RXRPC_CALL_COMPLETE) {
                        call->state = RXRPC_CALL_REMOTELY_ABORTED;
-                       call->abort_code = abort_code;
+                       call->remote_abort = abort_code;
                        set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
                        rxrpc_queue_call(call);
                }
@@ -428,7 +422,7 @@ protocol_error:
 protocol_error_locked:
        if (call->state <= RXRPC_CALL_COMPLETE) {
                call->state = RXRPC_CALL_LOCALLY_ABORTED;
-               call->abort_code = RX_PROTOCOL_ERROR;
+               call->local_abort = RX_PROTOCOL_ERROR;
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                rxrpc_queue_call(call);
        }
@@ -500,7 +494,7 @@ protocol_error:
        write_lock_bh(&call->state_lock);
        if (call->state <= RXRPC_CALL_COMPLETE) {
                call->state = RXRPC_CALL_LOCALLY_ABORTED;
-               call->abort_code = RX_PROTOCOL_ERROR;
+               call->local_abort = RX_PROTOCOL_ERROR;
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                rxrpc_queue_call(call);
        }
@@ -612,9 +606,9 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
        struct rxrpc_wire_header whdr;
 
        /* dig out the RxRPC connection details */
-       if (skb_copy_bits(skb, sizeof(struct udphdr), &whdr, sizeof(whdr)) < 0)
+       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
                return -EBADMSG;
-       if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(whdr)))
+       if (!pskb_pull(skb, sizeof(whdr)))
                BUG();
 
        memset(sp, 0, sizeof(*sp));
@@ -704,12 +698,12 @@ void rxrpc_data_ready(struct sock *sk)
        if (skb_checksum_complete(skb)) {
                rxrpc_free_skb(skb);
                rxrpc_put_local(local);
-               UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
+               __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
                _leave(" [CSUM failed]");
                return;
        }
 
-       UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
+       __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
 
        /* The socket buffer we have is owned by UDP, with UDP's data all over
         * it, but we really want our own data there.
index cd6cdbe87125a827186b85cbd8286dea9d8aab3e..f0b807a163fa3d7d10cc4ddb7fd66c0129a7ca44 100644 (file)
@@ -9,6 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <net/sock.h>
 #include <rxrpc/packet.h>
 
 #if 0
@@ -124,11 +125,15 @@ enum rxrpc_command {
  * RxRPC security module interface
  */
 struct rxrpc_security {
-       struct module           *owner;         /* providing module */
-       struct list_head        link;           /* link in master list */
        const char              *name;          /* name of this service */
        u8                      security_index; /* security type provided */
 
+       /* Initialise a security service */
+       int (*init)(void);
+
+       /* Clean up a security service */
+       void (*exit)(void);
+
        /* initialise a connection's security */
        int (*init_connection_security)(struct rxrpc_connection *);
 
@@ -268,7 +273,7 @@ struct rxrpc_connection {
        struct rb_root          calls;          /* calls on this connection */
        struct sk_buff_head     rx_queue;       /* received conn-level packets */
        struct rxrpc_call       *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
-       struct rxrpc_security   *security;      /* applied security module */
+       const struct rxrpc_security *security;  /* applied security module */
        struct key              *key;           /* security for this connection (client) */
        struct key              *server_key;    /* security for this service */
        struct crypto_skcipher  *cipher;        /* encryption handle */
@@ -289,7 +294,9 @@ struct rxrpc_connection {
                RXRPC_CONN_LOCALLY_ABORTED,     /* - conn aborted locally */
                RXRPC_CONN_NETWORK_ERROR,       /* - conn terminated by network error */
        } state;
-       int                     error;          /* error code for local abort */
+       u32                     local_abort;    /* local abort code */
+       u32                     remote_abort;   /* remote abort code */
+       int                     error;          /* local error incurred */
        int                     debug_id;       /* debug ID for printks */
        unsigned int            call_counter;   /* call ID counter */
        atomic_t                serial;         /* packet serial number counter */
@@ -399,7 +406,9 @@ struct rxrpc_call {
        rwlock_t                state_lock;     /* lock for state transition */
        atomic_t                usage;
        atomic_t                sequence;       /* Tx data packet sequence counter */
-       u32                     abort_code;     /* local/remote abort code */
+       u32                     local_abort;    /* local abort code */
+       u32                     remote_abort;   /* remote abort code */
+       int                     error;          /* local error incurred */
        enum rxrpc_call_state   state : 8;      /* current state of call */
        int                     debug_id;       /* debug ID for printks */
        u8                      channel;        /* connection channel occupied by this call */
@@ -453,7 +462,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
 {
        write_lock_bh(&call->state_lock);
        if (call->state < RXRPC_CALL_COMPLETE) {
-               call->abort_code = abort_code;
+               call->local_abort = abort_code;
                call->state = RXRPC_CALL_LOCALLY_ABORTED;
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
        }
@@ -478,13 +487,6 @@ int rxrpc_reject_call(struct rxrpc_sock *);
 /*
  * ar-ack.c
  */
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
-extern unsigned int rxrpc_rx_window_size;
-extern unsigned int rxrpc_rx_mtu;
-extern unsigned int rxrpc_rx_jumbo_max;
-
 void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
 void rxrpc_process_call(struct work_struct *);
@@ -506,7 +508,7 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
                                         unsigned long, int, gfp_t);
 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
                                       struct rxrpc_connection *,
-                                      struct rxrpc_host_header *, gfp_t);
+                                      struct rxrpc_host_header *);
 struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
 void rxrpc_release_call(struct rxrpc_call *);
 void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
@@ -531,8 +533,7 @@ void __exit rxrpc_destroy_all_connections(void);
 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
                                               struct rxrpc_host_header *);
 extern struct rxrpc_connection *
-rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *,
-                         gfp_t);
+rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
 
 /*
  * ar-connevent.c
@@ -550,8 +551,6 @@ void rxrpc_UDP_error_handler(struct work_struct *);
 /*
  * ar-input.c
  */
-extern const char *rxrpc_pkts[];
-
 void rxrpc_data_ready(struct sock *);
 int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
 void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
@@ -610,14 +609,10 @@ int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
 /*
  * ar-security.c
  */
-int rxrpc_register_security(struct rxrpc_security *);
-void rxrpc_unregister_security(struct rxrpc_security *);
+int __init rxrpc_init_security(void);
+void rxrpc_exit_security(void);
 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
 int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
-                       void *);
-int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
-void rxrpc_clear_conn_security(struct rxrpc_connection *);
 
 /*
  * ar-skbuff.c
@@ -636,6 +631,33 @@ void __exit rxrpc_destroy_all_transports(void);
 struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
                                             struct rxrpc_peer *);
 
+/*
+ * insecure.c
+ */
+extern const struct rxrpc_security rxrpc_no_security;
+
+/*
+ * misc.c
+ */
+extern unsigned int rxrpc_requested_ack_delay;
+extern unsigned int rxrpc_soft_ack_delay;
+extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned int rxrpc_rx_window_size;
+extern unsigned int rxrpc_rx_mtu;
+extern unsigned int rxrpc_rx_jumbo_max;
+
+extern const char *const rxrpc_pkts[];
+extern const s8 rxrpc_ack_priority[];
+
+extern const char *rxrpc_acks(u8 reason);
+
+/*
+ * rxkad.c
+ */
+#ifdef CONFIG_RXKAD
+extern const struct rxrpc_security rxkad;
+#endif
+
 /*
  * sysctl.c
  */
index d36fb6e1a29ca64a7db3ecb1686738150bd8b50c..51cb10062a8dd2ad877e57a5ba49cc8ea1da0159 100644 (file)
@@ -110,7 +110,7 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
 
        if (call->state <= RXRPC_CALL_COMPLETE) {
                call->state = RXRPC_CALL_LOCALLY_ABORTED;
-               call->abort_code = abort_code;
+               call->local_abort = abort_code;
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                del_timer_sync(&call->resend_timer);
                del_timer_sync(&call->ack_timer);
@@ -663,7 +663,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        size_t pad;
 
                        /* pad out if we're using security */
-                       if (conn->security) {
+                       if (conn->security_ix) {
                                pad = conn->security_size + skb->mark;
                                pad = conn->size_align - pad;
                                pad &= conn->size_align - 1;
@@ -695,7 +695,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (more && seq & 1)
                                sp->hdr.flags |= RXRPC_REQUEST_ACK;
 
-                       ret = rxrpc_secure_packet(
+                       ret = conn->security->secure_packet(
                                call, skb, skb->mark,
                                skb->head + sizeof(struct rxrpc_wire_header));
                        if (ret < 0)
index 525b2ba5a8f4095105d27833dbb948cd26c75b7b..225163bc658d518d7fc8ca0276e84fc9ddd8b3d9 100644 (file)
@@ -80,7 +80,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
                   call->conn->in_clientflag ? "Svc" : "Clt",
                   atomic_read(&call->usage),
                   rxrpc_call_states[call->state],
-                  call->abort_code,
+                  call->remote_abort ?: call->local_abort,
                   call->user_call_ID);
 
        return 0;
index 64facba24a4507b97f7612497a2f18aa6ff19466..160f0927aa3e8921ec030a0b2733cab71c2961be 100644 (file)
@@ -288,7 +288,11 @@ receive_non_data_message:
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
                break;
        case RXRPC_SKB_MARK_REMOTE_ABORT:
-               abort_code = call->abort_code;
+               abort_code = call->remote_abort;
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
+               break;
+       case RXRPC_SKB_MARK_LOCAL_ABORT:
+               abort_code = call->local_abort;
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
                break;
        case RXRPC_SKB_MARK_NET_ERROR:
@@ -303,6 +307,7 @@ receive_non_data_message:
                               &abort_code);
                break;
        default:
+               pr_err("RxRPC: Unknown packet mark %u\n", skb->mark);
                BUG();
                break;
        }
@@ -401,9 +406,14 @@ u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-       ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
-
-       return sp->call->abort_code;
+       switch (skb->mark) {
+       case RXRPC_SKB_MARK_REMOTE_ABORT:
+               return sp->call->remote_abort;
+       case RXRPC_SKB_MARK_LOCAL_ABORT:
+               return sp->call->local_abort;
+       default:
+               BUG();
+       }
 }
 
 EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
index ceff6394a65f62e118f56aa599ccafb82dfa6cad..d223253b22fa05f5aa61e4b375b608e7bb6af3e4 100644 (file)
 static LIST_HEAD(rxrpc_security_methods);
 static DECLARE_RWSEM(rxrpc_security_sem);
 
-/*
- * get an RxRPC security module
- */
-static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
-{
-       return try_module_get(sec->owner) ? sec : NULL;
-}
-
-/*
- * release an RxRPC security module
- */
-static void rxrpc_security_put(struct rxrpc_security *sec)
+static const struct rxrpc_security *rxrpc_security_types[] = {
+       [RXRPC_SECURITY_NONE]   = &rxrpc_no_security,
+#ifdef CONFIG_RXKAD
+       [RXRPC_SECURITY_RXKAD]  = &rxkad,
+#endif
+};
+
+int __init rxrpc_init_security(void)
 {
-       module_put(sec->owner);
-}
-
-/*
- * look up an rxrpc security module
- */
-static struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
-{
-       struct rxrpc_security *sec = NULL;
-
-       _enter("");
+       int i, ret;
 
-       down_read(&rxrpc_security_sem);
-
-       list_for_each_entry(sec, &rxrpc_security_methods, link) {
-               if (sec->security_index == security_index) {
-                       if (unlikely(!rxrpc_security_get(sec)))
-                               break;
-                       goto out;
+       for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++) {
+               if (rxrpc_security_types[i]) {
+                       ret = rxrpc_security_types[i]->init();
+                       if (ret < 0)
+                               goto failed;
                }
        }
 
-       sec = NULL;
-out:
-       up_read(&rxrpc_security_sem);
-       _leave(" = %p [%s]", sec, sec ? sec->name : "");
-       return sec;
+       return 0;
+
+failed:
+       for (i--; i >= 0; i--)
+               if (rxrpc_security_types[i])
+                       rxrpc_security_types[i]->exit();
+       return ret;
 }
 
-/**
- * rxrpc_register_security - register an RxRPC security handler
- * @sec: security module
- *
- * register an RxRPC security handler for use by RxRPC
- */
-int rxrpc_register_security(struct rxrpc_security *sec)
+void rxrpc_exit_security(void)
 {
-       struct rxrpc_security *psec;
-       int ret;
+       int i;
 
-       _enter("");
-       down_write(&rxrpc_security_sem);
-
-       ret = -EEXIST;
-       list_for_each_entry(psec, &rxrpc_security_methods, link) {
-               if (psec->security_index == sec->security_index)
-                       goto out;
-       }
-
-       list_add(&sec->link, &rxrpc_security_methods);
-
-       printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
-              sec->security_index, sec->name);
-       ret = 0;
-
-out:
-       up_write(&rxrpc_security_sem);
-       _leave(" = %d", ret);
-       return ret;
+       for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++)
+               if (rxrpc_security_types[i])
+                       rxrpc_security_types[i]->exit();
 }
 
-EXPORT_SYMBOL_GPL(rxrpc_register_security);
-
-/**
- * rxrpc_unregister_security - unregister an RxRPC security handler
- * @sec: security module
- *
- * unregister an RxRPC security handler
+/*
+ * look up an rxrpc security module
  */
-void rxrpc_unregister_security(struct rxrpc_security *sec)
+static const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
 {
-
-       _enter("");
-       down_write(&rxrpc_security_sem);
-       list_del_init(&sec->link);
-       up_write(&rxrpc_security_sem);
-
-       printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
-              sec->security_index, sec->name);
+       if (security_index >= ARRAY_SIZE(rxrpc_security_types))
+               return NULL;
+       return rxrpc_security_types[security_index];
 }
 
-EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
-
 /*
  * initialise the security on a client connection
  */
 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 {
+       const struct rxrpc_security *sec;
        struct rxrpc_key_token *token;
-       struct rxrpc_security *sec;
        struct key *key = conn->key;
        int ret;
 
@@ -148,8 +99,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 
        ret = conn->security->init_connection_security(conn);
        if (ret < 0) {
-               rxrpc_security_put(conn->security);
-               conn->security = NULL;
+               conn->security = &rxrpc_no_security;
                return ret;
        }
 
@@ -162,7 +112,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
  */
 int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
 {
-       struct rxrpc_security *sec;
+       const struct rxrpc_security *sec;
        struct rxrpc_local *local = conn->trans->local;
        struct rxrpc_sock *rx;
        struct key *key;
@@ -188,14 +138,12 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
 
        /* the service appears to have died */
        read_unlock_bh(&local->services_lock);
-       rxrpc_security_put(sec);
        _leave(" = -ENOENT");
        return -ENOENT;
 
 found_service:
        if (!rx->securities) {
                read_unlock_bh(&local->services_lock);
-               rxrpc_security_put(sec);
                _leave(" = -ENOKEY");
                return -ENOKEY;
        }
@@ -205,7 +153,6 @@ found_service:
                              &key_type_rxrpc_s, kdesc);
        if (IS_ERR(kref)) {
                read_unlock_bh(&local->services_lock);
-               rxrpc_security_put(sec);
                _leave(" = %ld [search]", PTR_ERR(kref));
                return PTR_ERR(kref);
        }
@@ -219,46 +166,3 @@ found_service:
        _leave(" = 0");
        return 0;
 }
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_secure_packet(const struct rxrpc_call *call,
-                       struct sk_buff *skb,
-                       size_t data_size,
-                       void *sechdr)
-{
-       if (call->conn->security)
-               return call->conn->security->secure_packet(
-                       call, skb, data_size, sechdr);
-       return 0;
-}
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
-                       u32 *_abort_code)
-{
-       if (call->conn->security)
-               return call->conn->security->verify_packet(
-                       call, skb, _abort_code);
-       return 0;
-}
-
-/*
- * clear connection security
- */
-void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
-{
-       _enter("{%d}", conn->debug_id);
-
-       if (conn->security) {
-               conn->security->clear(conn);
-               rxrpc_security_put(conn->security);
-               conn->security = NULL;
-       }
-
-       key_put(conn->key);
-       key_put(conn->server_key);
-}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
new file mode 100644 (file)
index 0000000..e571403
--- /dev/null
@@ -0,0 +1,83 @@
+/* Null security operations.
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static int none_init_connection_security(struct rxrpc_connection *conn)
+{
+       return 0;
+}
+
+static void none_prime_packet_security(struct rxrpc_connection *conn)
+{
+}
+
+static int none_secure_packet(const struct rxrpc_call *call,
+                              struct sk_buff *skb,
+                              size_t data_size,
+                              void *sechdr)
+{
+       return 0;
+}
+
+static int none_verify_packet(const struct rxrpc_call *call,
+                              struct sk_buff *skb,
+                              u32 *_abort_code)
+{
+       return 0;
+}
+
+static int none_respond_to_challenge(struct rxrpc_connection *conn,
+                                     struct sk_buff *skb,
+                                     u32 *_abort_code)
+{
+       *_abort_code = RX_PROTOCOL_ERROR;
+       return -EPROTO;
+}
+
+static int none_verify_response(struct rxrpc_connection *conn,
+                                struct sk_buff *skb,
+                                u32 *_abort_code)
+{
+       *_abort_code = RX_PROTOCOL_ERROR;
+       return -EPROTO;
+}
+
+static void none_clear(struct rxrpc_connection *conn)
+{
+}
+
+static int none_init(void)
+{
+       return 0;
+}
+
+static void none_exit(void)
+{
+}
+
+/*
+ * RxRPC Kerberos-based security
+ */
+const struct rxrpc_security rxrpc_no_security = {
+       .name                           = "none",
+       .security_index                 = RXRPC_SECURITY_NONE,
+       .init                           = none_init,
+       .exit                           = none_exit,
+       .init_connection_security       = none_init_connection_security,
+       .prime_packet_security          = none_prime_packet_security,
+       .secure_packet                  = none_secure_packet,
+       .verify_packet                  = none_verify_packet,
+       .respond_to_challenge           = none_respond_to_challenge,
+       .verify_response                = none_verify_response,
+       .clear                          = none_clear,
+};
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
new file mode 100644 (file)
index 0000000..1afe987
--- /dev/null
@@ -0,0 +1,89 @@
+/* Miscellaneous bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+/*
+ * How long to wait before scheduling ACK generation after seeing a
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ */
+unsigned int rxrpc_requested_ack_delay = 1;
+
+/*
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ *
+ * We use this when we've received new data packets.  If those packets aren't
+ * all consumed within this time we will send a DELAY ACK if an ACK was not
+ * requested to let the sender know it doesn't need to resend.
+ */
+unsigned int rxrpc_soft_ack_delay = 1 * HZ;
+
+/*
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ *
+ * We use this when we've consumed some previously soft-ACK'd packets when
+ * further packets aren't immediately received to decide when to send an IDLE
+ * ACK let the other end know that it can free up its Tx buffer space.
+ */
+unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
+
+/*
+ * Receive window size in packets.  This indicates the maximum number of
+ * unconsumed received packets we're willing to retain in memory.  Once this
+ * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
+ * packets.
+ */
+unsigned int rxrpc_rx_window_size = 32;
+
+/*
+ * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
+ * made by gluing normal packets together that we're willing to handle.
+ */
+unsigned int rxrpc_rx_mtu = 5692;
+
+/*
+ * The maximum number of fragments in a received jumbo packet that we tell the
+ * sender that we're willing to handle.
+ */
+unsigned int rxrpc_rx_jumbo_max = 4;
+
+const char *const rxrpc_pkts[] = {
+       "?00",
+       "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
+       "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
+};
+
+const s8 rxrpc_ack_priority[] = {
+       [0]                             = 0,
+       [RXRPC_ACK_DELAY]               = 1,
+       [RXRPC_ACK_REQUESTED]           = 2,
+       [RXRPC_ACK_IDLE]                = 3,
+       [RXRPC_ACK_PING_RESPONSE]       = 4,
+       [RXRPC_ACK_DUPLICATE]           = 5,
+       [RXRPC_ACK_OUT_OF_SEQUENCE]     = 6,
+       [RXRPC_ACK_EXCEEDS_WINDOW]      = 7,
+       [RXRPC_ACK_NOSPACE]             = 8,
+};
+
+const char *rxrpc_acks(u8 reason)
+{
+       static const char *const str[] = {
+               "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
+               "IDL", "-?-"
+       };
+
+       if (reason >= ARRAY_SIZE(str))
+               reason = ARRAY_SIZE(str) - 1;
+       return str[reason];
+}
index f0aeb8163688e6f4167874d9a6ac0f3bd270891d..6b726a046a7d47995ecc281f67979073fe02c102 100644 (file)
@@ -20,7 +20,6 @@
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include <keys/rxrpc-type.h>
-#define rxrpc_debug rxkad_debug
 #include "ar-internal.h"
 
 #define RXKAD_VERSION                  2
 #define REALM_SZ                       40      /* size of principal's auth domain */
 #define SNAME_SZ                       40      /* size of service name */
 
-unsigned int rxrpc_debug;
-module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "rxkad debugging mask");
-
 struct rxkad_level1_hdr {
        __be32  data_size;      /* true data size (excluding padding) */
 };
@@ -44,10 +39,6 @@ struct rxkad_level2_hdr {
        __be32  checksum;       /* decrypted data checksum */
 };
 
-MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
 /*
  * this holds a pinned cipher so that keventd doesn't get called by the cipher
  * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
@@ -1163,13 +1154,36 @@ static void rxkad_clear(struct rxrpc_connection *conn)
                crypto_free_skcipher(conn->cipher);
 }
 
+/*
+ * Initialise the rxkad security service.
+ */
+static int rxkad_init(void)
+{
+       /* pin the cipher we need so that the crypto layer doesn't invoke
+        * keventd to go get it */
+       rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(rxkad_ci))
+               return PTR_ERR(rxkad_ci);
+       return 0;
+}
+
+/*
+ * Clean up the rxkad security service.
+ */
+static void rxkad_exit(void)
+{
+       if (rxkad_ci)
+               crypto_free_skcipher(rxkad_ci);
+}
+
 /*
  * RxRPC Kerberos-based security
  */
-static struct rxrpc_security rxkad = {
-       .owner                          = THIS_MODULE,
+const struct rxrpc_security rxkad = {
        .name                           = "rxkad",
        .security_index                 = RXRPC_SECURITY_RXKAD,
+       .init                           = rxkad_init,
+       .exit                           = rxkad_exit,
        .init_connection_security       = rxkad_init_connection_security,
        .prime_packet_security          = rxkad_prime_packet_security,
        .secure_packet                  = rxkad_secure_packet,
@@ -1179,28 +1193,3 @@ static struct rxrpc_security rxkad = {
        .verify_response                = rxkad_verify_response,
        .clear                          = rxkad_clear,
 };
-
-static __init int rxkad_init(void)
-{
-       _enter("");
-
-       /* pin the cipher we need so that the crypto layer doesn't invoke
-        * keventd to go get it */
-       rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(rxkad_ci))
-               return PTR_ERR(rxkad_ci);
-
-       return rxrpc_register_security(&rxkad);
-}
-
-module_init(rxkad_init);
-
-static __exit void rxkad_exit(void)
-{
-       _enter("");
-
-       rxrpc_unregister_security(&rxkad);
-       crypto_free_skcipher(rxkad_ci);
-}
-
-module_exit(rxkad_exit);
index 96066665e3765d342fd1685ab32f0019c7455564..336774a535c3959f4f25b05d1732c014d0d1763c 100644 (file)
@@ -657,12 +657,15 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
        if (compat_mode) {
                if (a->type == TCA_OLD_COMPAT)
                        err = gnet_stats_start_copy_compat(skb, 0,
-                               TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d);
+                                                          TCA_STATS,
+                                                          TCA_XSTATS,
+                                                          &p->tcfc_lock, &d,
+                                                          TCA_PAD);
                else
                        return 0;
        } else
                err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
-                                           &p->tcfc_lock, &d);
+                                           &p->tcfc_lock, &d, TCA_ACT_PAD);
 
        if (err < 0)
                goto errout;
index 8c9f1f0459ab773139112c453fd1540af3a1b94f..c7123e01c2cabc9c58bbe4d3be2c3836d7f5e2e2 100644 (file)
@@ -53,9 +53,11 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
        filter = rcu_dereference(prog->filter);
        if (at_ingress) {
                __skb_push(skb, skb->mac_len);
+               bpf_compute_data_end(skb);
                filter_res = BPF_PROG_RUN(filter, skb);
                __skb_pull(skb, skb->mac_len);
        } else {
+               bpf_compute_data_end(skb);
                filter_res = BPF_PROG_RUN(filter, skb);
        }
        rcu_read_unlock();
@@ -156,7 +158,8 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
        tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
        tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
 
-       if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
+       if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
+                         TCA_ACT_BPF_PAD))
                goto nla_put_failure;
 
        return skb->len;
index c0ed93ce23910f8da64622f5f2df3eea867ee786..2ba700c765e047f3781708c8789ee63dfcc1a37e 100644 (file)
@@ -163,7 +163,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
-       if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
+                         TCA_CONNMARK_PAD))
                goto nla_put_failure;
 
        return skb->len;
index d22426cdebc08b95357d3e7f38b237d5cd60915f..28e934ed038a6f652d9aaa1b7d82dffd642d73a7 100644 (file)
@@ -549,7 +549,7 @@ static int tcf_csum_dump(struct sk_buff *skb,
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
                goto nla_put_failure;
 
        return skb->len;
index 887fc1f209ff6355ed4c83254a0ea56666942157..1a6e09fbb2a590f99f08cf4a5ebecba5ec4f996f 100644 (file)
@@ -177,7 +177,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
-       if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
                goto nla_put_failure;
        return skb->len;
 
index c589a9ba506af8ba1376f48162ec8224281a48a0..556f44c9c454b2519971671943d6aa3540132e41 100644 (file)
@@ -550,7 +550,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
        t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
-       if (nla_put(skb, TCA_IFE_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
                goto nla_put_failure;
 
        if (!is_zero_ether_addr(ife->eth_dst)) {
index 350e134cffb32b04f3e4c2b4b3917051cd55b456..1464f6a09446bf0e36985d0921c9695b0e84b68d 100644 (file)
@@ -275,7 +275,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
        tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
        tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
-       if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+       if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
                goto nla_put_failure;
        kfree(t);
        return skb->len;
index e8a760cf7775ea1e3c9522376c3e3111a54a2ba6..dea57c1ec90c31f73d3edfde057a92e0693590ab 100644 (file)
@@ -214,7 +214,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
        t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
-       if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
                goto nla_put_failure;
        return skb->len;
 
index 0f65cdfbfb1d364529feaf701f1c188bb9570973..c0a879f940de1b8d7477936a50bb6beee91176aa 100644 (file)
@@ -267,7 +267,7 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
                goto nla_put_failure;
 
        return skb->len;
index 429c3ab65142671e1d6d12a92b991fa5aec55a04..c6e18f230af690879686d868ad560e091e993443 100644 (file)
@@ -203,7 +203,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
                goto nla_put_failure;
        kfree(opt);
        return skb->len;
index 75b2be13fbcc452da0a1379dd693fd9693b75e4d..2057fd56d74c401edc8a43257e5144c4375a5495 100644 (file)
@@ -155,7 +155,7 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-       if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
                goto nla_put_failure;
        return skb->len;
 
index cfcdbdc00c9bfef4702ae09875080cc0b3df11ea..51b24998904f68764c5232ec8c6b7d5a0b053653 100644 (file)
@@ -167,7 +167,7 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-       if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
                goto nla_put_failure;
        return skb->len;
 
index bab8ae0cefc08800678415b163921c67a0e50e4e..c1682ab9bc7ec342ea8a6322eca0595bdfaba5e4 100644 (file)
@@ -175,7 +175,7 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
        t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
-       if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t))
+       if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
                goto nla_put_failure;
        return skb->len;
 
index 425fe6a0eda33e47952802956c0c48aea67f80df..7b342c779da7b1f7572f50e6bb45d4b99a1238df 100644 (file)
@@ -96,9 +96,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                if (at_ingress) {
                        /* It is safe to push/pull even if skb_shared() */
                        __skb_push(skb, skb->mac_len);
+                       bpf_compute_data_end(skb);
                        filter_res = BPF_PROG_RUN(prog->filter, skb);
                        __skb_pull(skb, skb->mac_len);
                } else {
+                       bpf_compute_data_end(skb);
                        filter_res = BPF_PROG_RUN(prog->filter, skb);
                }
 
index 563cdad764485a2fbce96dcf27c4f777ea7d186e..e64877a3c084339de22c08270d4cdb3a496b5c6d 100644 (file)
@@ -1140,9 +1140,10 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                                gpf->kcnts[i] += pf->kcnts[i];
                }
 
-               if (nla_put(skb, TCA_U32_PCNT,
-                           sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
-                           gpf)) {
+               if (nla_put_64bit(skb, TCA_U32_PCNT,
+                                 sizeof(struct tc_u32_pcnt) +
+                                 n->sel.nkeys * sizeof(u64),
+                                 gpf, TCA_U32_PAD)) {
                        kfree(gpf);
                        goto nla_put_failure;
                }
index f2aabc0089da203cd6b4d5b15eea40ad5299babe..a309a07ccb3535453ce3235d3e6a91933282fc3f 100644 (file)
@@ -796,7 +796,7 @@ struct meta_type_ops {
        int     (*dump)(struct sk_buff *, struct meta_value *, int);
 };
 
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
+static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
        [TCF_META_TYPE_VAR] = {
                .destroy = meta_var_destroy,
                .compare = meta_var_compare,
@@ -812,7 +812,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
        }
 };
 
-static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
+static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
 {
        return &__meta_type_ops[meta_type(v)];
 }
@@ -870,7 +870,7 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
 static void meta_delete(struct meta_match *meta)
 {
        if (meta) {
-               struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
+               const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
 
                if (ops && ops->destroy) {
                        ops->destroy(&meta->lvalue);
@@ -964,7 +964,7 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
 {
        struct meta_match *meta = (struct meta_match *) em->data;
        struct tcf_meta_hdr hdr;
-       struct meta_type_ops *ops;
+       const struct meta_type_ops *ops;
 
        memset(&hdr, 0, sizeof(hdr));
        memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
index 3b180ff72f79f838deac16ce9fc69e9ef28b2c0c..64f71a2155f3ae8199123c0ee635fc9fa420c35b 100644 (file)
@@ -1365,7 +1365,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto nla_put_failure;
 
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
-                                        qdisc_root_sleeping_lock(q), &d) < 0)
+                                        qdisc_root_sleeping_lock(q), &d,
+                                        TCA_PAD) < 0)
                goto nla_put_failure;
 
        if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1679,7 +1680,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
                goto nla_put_failure;
 
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
-                                        qdisc_root_sleeping_lock(q), &d) < 0)
+                                        qdisc_root_sleeping_lock(q), &d,
+                                        TCA_PAD) < 0)
                goto nla_put_failure;
 
        if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
index 9b7e2980ee5c64c18477d003957fedd10c1718d5..dddf3bb65a328a2de51a5eb9f6f2945b68cc9d11 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/prefetch.h>
 #include <net/pkt_sched.h>
 #include <net/codel.h>
+#include <net/codel_impl.h>
+#include <net/codel_qdisc.h>
 
 
 #define DEFAULT_CODEL_LIMIT 1000
@@ -64,20 +66,33 @@ struct codel_sched_data {
  * to dequeue a packet from queue. Note: backlog is handled in
  * codel, we dont need to reduce it here.
  */
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 {
+       struct Qdisc *sch = ctx;
        struct sk_buff *skb = __skb_dequeue(&sch->q);
 
+       if (skb)
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
+
        prefetch(&skb->end); /* we'll need skb_shinfo() */
        return skb;
 }
 
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+       struct Qdisc *sch = ctx;
+
+       qdisc_drop(skb, sch);
+}
+
 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
 
-       skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+       skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
+                           &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+                           drop_func, dequeue_func);
 
        /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
         * or HTB crashes. Defer it for next round.
@@ -173,9 +188,10 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
 
        sch->limit = DEFAULT_CODEL_LIMIT;
 
-       codel_params_init(&q->params, sch);
+       codel_params_init(&q->params);
        codel_vars_init(&q->vars);
        codel_stats_init(&q->stats);
+       q->params.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
                int err = codel_change(sch, opt);
index d3fc8f9dd3d464a23f9ea924fc00154a861eda26..bb8bd93146295b47ea4faff93781cb9078b739a3 100644 (file)
@@ -24,6 +24,8 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/codel.h>
+#include <net/codel_impl.h>
+#include <net/codel_qdisc.h>
 
 /*     Fair Queue CoDel.
  *
@@ -57,8 +59,12 @@ struct fq_codel_sched_data {
        u32             flows_cnt;      /* number of flows */
        u32             perturbation;   /* hash perturbation */
        u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
+       u32             drop_batch_size;
+       u32             memory_limit;
        struct codel_params cparams;
        struct codel_stats cstats;
+       u32             memory_usage;
+       u32             drop_overmemory;
        u32             drop_overlimit;
        u32             new_flow_count;
 
@@ -133,17 +139,21 @@ static inline void flow_queue_add(struct fq_codel_flow *flow,
        skb->next = NULL;
 }
 
-static unsigned int fq_codel_drop(struct Qdisc *sch)
+static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
        unsigned int maxbacklog = 0, idx = 0, i, len;
        struct fq_codel_flow *flow;
+       unsigned int threshold;
+       unsigned int mem = 0;
 
-       /* Queue is full! Find the fat flow and drop packet from it.
+       /* Queue is full! Find the fat flow and drop packet(s) from it.
         * This might sound expensive, but with 1024 flows, we scan
         * 4KB of memory, and we dont need to handle a complex tree
         * in fast path (packet queue/enqueue) with many cache misses.
+        * In stress mode, we'll try to drop 64 packets from the flow,
+        * amortizing this linear lookup to one cache line per drop.
         */
        for (i = 0; i < q->flows_cnt; i++) {
                if (q->backlogs[i] > maxbacklog) {
@@ -151,15 +161,26 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
                        idx = i;
                }
        }
+
+       /* Our goal is to drop half of this fat flow backlog */
+       threshold = maxbacklog >> 1;
+
        flow = &q->flows[idx];
-       skb = dequeue_head(flow);
-       len = qdisc_pkt_len(skb);
+       len = 0;
+       i = 0;
+       do {
+               skb = dequeue_head(flow);
+               len += qdisc_pkt_len(skb);
+               mem += skb->truesize;
+               kfree_skb(skb);
+       } while (++i < max_packets && len < threshold);
+
+       flow->dropped += i;
        q->backlogs[idx] -= len;
-       sch->q.qlen--;
-       qdisc_qstats_drop(sch);
-       qdisc_qstats_backlog_dec(sch, skb);
-       kfree_skb(skb);
-       flow->dropped++;
+       q->memory_usage -= mem;
+       sch->qstats.drops += i;
+       sch->qstats.backlog -= len;
+       sch->q.qlen -= i;
        return idx;
 }
 
@@ -168,16 +189,17 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
        unsigned int prev_backlog;
 
        prev_backlog = sch->qstats.backlog;
-       fq_codel_drop(sch);
+       fq_codel_drop(sch, 1U);
        return prev_backlog - sch->qstats.backlog;
 }
 
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
-       unsigned int idx, prev_backlog;
+       unsigned int idx, prev_backlog, prev_qlen;
        struct fq_codel_flow *flow;
        int uninitialized_var(ret);
+       bool memory_limited;
 
        idx = fq_codel_classify(skb, sch, &ret);
        if (idx == 0) {
@@ -200,28 +222,38 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                flow->deficit = q->quantum;
                flow->dropped = 0;
        }
-       if (++sch->q.qlen <= sch->limit)
+       q->memory_usage += skb->truesize;
+       memory_limited = q->memory_usage > q->memory_limit;
+       if (++sch->q.qlen <= sch->limit && !memory_limited)
                return NET_XMIT_SUCCESS;
 
        prev_backlog = sch->qstats.backlog;
-       q->drop_overlimit++;
-       /* Return Congestion Notification only if we dropped a packet
-        * from this flow.
+       prev_qlen = sch->q.qlen;
+
+       /* fq_codel_drop() is quite expensive, as it performs a linear search
+        * in q->backlogs[] to find a fat flow.
+        * So instead of dropping a single packet, drop half of its backlog
+        * with a 64 packets limit to not add a too big cpu spike here.
         */
-       if (fq_codel_drop(sch) == idx)
-               return NET_XMIT_CN;
+       ret = fq_codel_drop(sch, q->drop_batch_size);
+
+       q->drop_overlimit += prev_qlen - sch->q.qlen;
+       if (memory_limited)
+               q->drop_overmemory += prev_qlen - sch->q.qlen;
+       /* As we dropped packet(s), better let upper stack know this */
+       qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
+                                 prev_backlog - sch->qstats.backlog);
 
-       /* As we dropped a packet, better let upper stack know this */
-       qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
-       return NET_XMIT_SUCCESS;
+       return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
  * to dequeue a packet from queue. Note: backlog is handled in
  * codel, we dont need to reduce it here.
  */
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 {
+       struct Qdisc *sch = ctx;
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        struct fq_codel_flow *flow;
        struct sk_buff *skb = NULL;
@@ -231,10 +263,18 @@ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
                skb = dequeue_head(flow);
                q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
                sch->q.qlen--;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
        }
        return skb;
 }
 
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+       struct Qdisc *sch = ctx;
+
+       qdisc_drop(skb, sch);
+}
+
 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -263,8 +303,9 @@ begin:
        prev_ecn_mark = q->cstats.ecn_mark;
        prev_backlog = sch->qstats.backlog;
 
-       skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
-                           dequeue);
+       skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
+                           &flow->cvars, &q->cstats, qdisc_pkt_len,
+                           codel_get_enqueue_time, drop_func, dequeue_func);
 
        flow->dropped += q->cstats.drop_count - prev_drop_count;
        flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
@@ -277,6 +318,7 @@ begin:
                        list_del_init(&flow->flowchain);
                goto begin;
        }
+       q->memory_usage -= skb->truesize;
        qdisc_bstats_update(sch, skb);
        flow->deficit -= qdisc_pkt_len(skb);
        /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
@@ -323,6 +365,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
        [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
        [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
        [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
+       [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
+       [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -374,7 +418,14 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_FQ_CODEL_QUANTUM])
                q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
 
-       while (sch->q.qlen > sch->limit) {
+       if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
+               q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+
+       if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
+               q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
+
+       while (sch->q.qlen > sch->limit ||
+              q->memory_usage > q->memory_limit) {
                struct sk_buff *skb = fq_codel_dequeue(sch);
 
                q->cstats.drop_len += qdisc_pkt_len(skb);
@@ -419,13 +470,16 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 
        sch->limit = 10*1024;
        q->flows_cnt = 1024;
+       q->memory_limit = 32 << 20; /* 32 MBytes */
+       q->drop_batch_size = 64;
        q->quantum = psched_mtu(qdisc_dev(sch));
        q->perturbation = prandom_u32();
        INIT_LIST_HEAD(&q->new_flows);
        INIT_LIST_HEAD(&q->old_flows);
-       codel_params_init(&q->cparams, sch);
+       codel_params_init(&q->cparams);
        codel_stats_init(&q->cstats);
        q->cparams.ecn = true;
+       q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
                int err = fq_codel_change(sch, opt);
@@ -476,6 +530,10 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
                        q->cparams.ecn) ||
            nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
                        q->quantum) ||
+           nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
+                       q->drop_batch_size) ||
+           nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
+                       q->memory_limit) ||
            nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
                        q->flows_cnt))
                goto nla_put_failure;
@@ -504,6 +562,8 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
        st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
        st.qdisc_stats.new_flow_count = q->new_flow_count;
        st.qdisc_stats.ce_mark = q->cstats.ce_mark;
+       st.qdisc_stats.memory_usage  = q->memory_usage;
+       st.qdisc_stats.drop_overmemory = q->drop_overmemory;
 
        list_for_each(pos, &q->new_flows)
                st.qdisc_stats.new_flows_len++;
index f18c3502420730e87254a6b01f0a6c1e4fb96941..269dd71b3828c03867c5dbbe7b041ad4babcf1f1 100644 (file)
@@ -108,35 +108,6 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
        return skb;
 }
 
-static inline int handle_dev_cpu_collision(struct sk_buff *skb,
-                                          struct netdev_queue *dev_queue,
-                                          struct Qdisc *q)
-{
-       int ret;
-
-       if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
-               /*
-                * Same CPU holding the lock. It may be a transient
-                * configuration error, when hard_start_xmit() recurses. We
-                * detect it by checking xmit owner and drop the packet when
-                * deadloop is detected. Return OK to try the next skb.
-                */
-               kfree_skb_list(skb);
-               net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
-                                    dev_queue->dev->name);
-               ret = qdisc_qlen(q);
-       } else {
-               /*
-                * Another cpu is holding lock, requeue & delay xmits for
-                * some time.
-                */
-               __this_cpu_inc(softnet_data.cpu_collision);
-               ret = dev_requeue_skb(skb, q);
-       }
-
-       return ret;
-}
-
 /*
  * Transmit possibly several skbs, and handle the return status as
  * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
@@ -159,21 +130,21 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        if (validate)
                skb = validate_xmit_skb_list(skb, dev);
 
-       if (skb) {
+       if (likely(skb)) {
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (!netif_xmit_frozen_or_stopped(txq))
                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 
                HARD_TX_UNLOCK(dev, txq);
+       } else {
+               spin_lock(root_lock);
+               return qdisc_qlen(q);
        }
        spin_lock(root_lock);
 
        if (dev_xmit_complete(ret)) {
                /* Driver sent out skb successfully or skb was consumed */
                ret = qdisc_qlen(q);
-       } else if (ret == NETDEV_TX_LOCKED) {
-               /* Driver try lock failed */
-               ret = handle_dev_cpu_collision(skb, txq, q);
        } else {
                /* Driver returned NETDEV_TX_BUSY - requeue skb */
                if (unlikely(ret != NETDEV_TX_BUSY))
@@ -256,13 +227,12 @@ unsigned long dev_trans_start(struct net_device *dev)
 
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
-       res = dev->trans_start;
-       for (i = 0; i < dev->num_tx_queues; i++) {
+       res = netdev_get_tx_queue(dev, 0)->trans_start;
+       for (i = 1; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
                if (val && time_after(val, res))
                        res = val;
        }
-       dev->trans_start = res;
 
        return res;
 }
@@ -285,10 +255,7 @@ static void dev_watchdog(unsigned long arg)
                                struct netdev_queue *txq;
 
                                txq = netdev_get_tx_queue(dev, i);
-                               /*
-                                * old device drivers set dev->trans_start
-                                */
-                               trans_start = txq->trans_start ? : dev->trans_start;
+                               trans_start = txq->trans_start;
                                if (netif_xmit_stopped(txq) &&
                                    time_after(jiffies, (trans_start +
                                                         dev->watchdog_timeo))) {
@@ -804,7 +771,7 @@ void dev_activate(struct net_device *dev)
                transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
 
        if (need_watchdog) {
-               dev->trans_start = jiffies;
+               netif_trans_update(dev);
                dev_watchdog_up(dev);
        }
 }
index 87b02ed3d5f212cee3e5bf871c71b72b6329a8aa..f6bf5818ed4d265feafe9fd44319371fbc5c1d26 100644 (file)
@@ -1122,10 +1122,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
        if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
-           nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+           nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
+                             TCA_HTB_PAD))
                goto nla_put_failure;
        if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
-           nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+           nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
+                             TCA_HTB_PAD))
                goto nla_put_failure;
 
        return nla_nest_end(skb, nest);
index 9640bb39a5d293d55a96edc5164d369c1cded127..205bed00dd3463c62696ecc61eb78f2c97b3d0c9 100644 (file)
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
        sch->q.qlen++;
 }
 
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct sk_buff *segs;
+       netdev_features_t features = netif_skb_features(skb);
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs)) {
+               qdisc_reshape_fail(skb, sch);
+               return NULL;
+       }
+       consume_skb(skb);
+       return segs;
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
+       struct sk_buff *segs = NULL;
+       unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+       int nb = 0;
        int count = 1;
+       int rc = NET_XMIT_SUCCESS;
 
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         * do it now in software before we mangle it.
         */
        if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch);
+                       if (!segs)
+                               return NET_XMIT_DROP;
+               } else {
+                       segs = skb;
+               }
+
+               skb = segs;
+               segs = segs->next;
+
                if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
                    (skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb_checksum_help(skb)))
-                       return qdisc_drop(skb, sch);
+                    skb_checksum_help(skb))) {
+                       rc = qdisc_drop(skb, sch);
+                       goto finish_segs;
+               }
 
                skb->data[prandom_u32() % skb_headlen(skb)] ^=
                        1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->qstats.requeues++;
        }
 
+finish_segs:
+       if (segs) {
+               while (segs) {
+                       skb2 = segs->next;
+                       segs->next = NULL;
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       last_len = segs->len;
+                       rc = qdisc_enqueue(segs, sch);
+                       if (rc != NET_XMIT_SUCCESS) {
+                               if (net_xmit_drop_count(rc))
+                                       qdisc_qstats_drop(sch);
+                       } else {
+                               nb++;
+                               len += last_len;
+                       }
+                       segs = skb2;
+               }
+               sch->q.qlen += nb;
+               if (nb > 1)
+                       qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+       }
        return NET_XMIT_SUCCESS;
 }
 
@@ -994,7 +1051,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        if (q->rate >= (1ULL << 32)) {
-               if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
+               if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
+                                     TCA_NETEM_PAD))
                        goto nla_put_failure;
                rate.rate = ~0U;
        } else {
index c2fbde742f37347d6974ed87368fd3af4425de74..83b90b584fae4fcee779bdeb638df5c59f40f007 100644 (file)
@@ -472,11 +472,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
        if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
-           nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
+           nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
+                             TCA_TBF_PAD))
                goto nla_put_failure;
        if (tbf_peak_present(q) &&
            q->peak.rate_bytes_ps >= (1ULL << 32) &&
-           nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
+           nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
+                             TCA_TBF_PAD))
                goto nla_put_failure;
 
        return nla_nest_end(skb, nest);
index 71c1a598d9bc270c5567c5f69dd5058b86b15540..d9c04dc1b3f3a5e6e801380954c16dda22395b5e 100644 (file)
@@ -99,5 +99,9 @@ config SCTP_COOKIE_HMAC_SHA1
        select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
        select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
 
+config INET_SCTP_DIAG
+       depends on INET_DIAG
+       def_tristate INET_DIAG
+
 
 endif # IP_SCTP
index 3b4ffb021cf1728353b5311e519604759a03b617..0fca5824ad0e93c905e2cbd59ff2ff7e2077ca7c 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_IP_SCTP) += sctp.o
 obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
+obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
 
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
          protocol.o endpointola.o associola.o \
index 958ef5f33f4b8165592f1631cb6c4bf035dcc702..1eb94bf18ef443eb7cefdf000ac2404587151572 100644 (file)
@@ -239,7 +239,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        offset = 0;
 
        if ((whole > 1) || (whole && over))
-               SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
        /* Create chunks for all the full sized DATA chunks. */
        for (i = 0, len = first_len; i < whole; i++) {
index 00b8445364e3d5045f0ff71d9e836c18e7c6a401..a701527a9480faff1b8d91257e1dbf3c0f09ed68 100644 (file)
@@ -84,7 +84,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 
        if (val != cmp) {
                /* CRC failure, dump it. */
-               SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
+               __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
                return -1;
        }
        return 0;
@@ -122,7 +122,7 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb->pkt_type != PACKET_HOST)
                goto discard_it;
 
-       SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
+       __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
 
        if (skb_linearize(skb))
                goto discard_it;
@@ -208,7 +208,7 @@ int sctp_rcv(struct sk_buff *skb)
         */
        if (!asoc) {
                if (sctp_rcv_ootb(skb)) {
-                       SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
+                       __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
                        goto discard_release;
                }
        }
@@ -264,9 +264,9 @@ int sctp_rcv(struct sk_buff *skb)
                        skb = NULL; /* sctp_chunk_free already freed the skb */
                        goto discard_release;
                }
-               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
+               __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
        } else {
-               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
+               __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
        }
 
@@ -281,7 +281,7 @@ int sctp_rcv(struct sk_buff *skb)
        return 0;
 
 discard_it:
-       SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
+       __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
        kfree_skb(skb);
        return 0;
 
@@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        *app = asoc;
        *tpp = transport;
@@ -589,7 +589,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        skb->network_header = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
        }
        /* Warning:  The sock lock is held.  Remember to call
index 7e8a16c77039e1b70ef89f3e862dbb332bcc614f..9d87bba0ff1d34134093f34e4db3dc1b7e3dafd6 100644 (file)
@@ -89,10 +89,12 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
         * Eventually, we should clean up inqueue to not rely
         * on the BH related data structures.
         */
+       local_bh_disable();
        list_add_tail(&chunk->list, &q->in_chunk_list);
        if (chunk->asoc)
                chunk->asoc->stats.ipackets++;
        q->immediate.func(&q->immediate);
+       local_bh_enable();
 }
 
 /* Peek at the next chunk on the inqeue. */
@@ -163,6 +165,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
                chunk->singleton = 1;
                ch = (sctp_chunkhdr_t *) chunk->skb->data;
                chunk->data_accepted = 0;
+
+               if (chunk->asoc)
+                       sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
        }
 
        chunk->chunk_hdr = ch;
index ce46f1c7f133ad5b114e4c2cd571d26c2b9ee901..0657d18a85bf7aa751a0456d0cc9adae3ff95e42 100644 (file)
@@ -162,7 +162,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        skb->network_header   = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
+               __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
                goto out;
        }
 
index 97745351d58c2fb32b9f9b57d61831d7724d83b2..9844fe573029b9e262743440980f15277ddaf5a1 100644 (file)
@@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
        /* Check whether this chunk and all the rest of pending data will fit
         * or delay in hopes of bundling a full sized packet.
         */
-       if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
+       if (chunk->skb->len + q->out_qlen >
+               transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
                /* Enough data queued to fill a packet */
                return SCTP_XMIT_OK;
 
index 8d3d3625130ee0fd294998554a9290d57eae56e7..084718f9b3dad09e21e41e34b989e25627058c98 100644 (file)
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                                 * sender MUST assure that at least one T3-rtx
                                 * timer is running.
                                 */
-                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
-                                       sctp_transport_reset_timers(transport);
+                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+                                       sctp_transport_reset_t3_rtx(transport);
+                                       transport->last_time_sent = jiffies;
+                               }
                        }
                        break;
 
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        error = sctp_outq_flush_rtx(q, packet,
                                                    rtx_timeout, &start_timer);
 
-                       if (start_timer)
-                               sctp_transport_reset_timers(transport);
+                       if (start_timer) {
+                               sctp_transport_reset_t3_rtx(transport);
+                               transport->last_time_sent = jiffies;
+                       }
 
                        /* This can happen on COOKIE-ECHO resend.  Only
                         * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        list_add_tail(&chunk->transmitted_list,
                                      &transport->transmitted);
 
-                       sctp_transport_reset_timers(transport);
+                       sctp_transport_reset_t3_rtx(transport);
+                       transport->last_time_sent = jiffies;
 
                        /* Only let one DATA chunk get bundled with a
                         * COOKIE-ECHO chunk.
index 6d45d53321e6bf2559233e321eea19c0d6c2ba19..4cb5aedfe3ee2cf188385ef1f18cd63908a558ba 100644 (file)
@@ -280,83 +280,38 @@ void sctp_eps_proc_exit(struct net *net)
 struct sctp_ht_iter {
        struct seq_net_private p;
        struct rhashtable_iter hti;
+       int start_fail;
 };
 
-static struct sctp_transport *sctp_transport_get_next(struct seq_file *seq)
+static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 {
        struct sctp_ht_iter *iter = seq->private;
-       struct sctp_transport *t;
-
-       t = rhashtable_walk_next(&iter->hti);
-       for (; t; t = rhashtable_walk_next(&iter->hti)) {
-               if (IS_ERR(t)) {
-                       if (PTR_ERR(t) == -EAGAIN)
-                               continue;
-                       break;
-               }
+       int err = sctp_transport_walk_start(&iter->hti);
 
-               if (net_eq(sock_net(t->asoc->base.sk), seq_file_net(seq)) &&
-                   t->asoc->peer.primary_path == t)
-                       break;
+       if (err) {
+               iter->start_fail = 1;
+               return ERR_PTR(err);
        }
 
-       return t;
+       return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
-static struct sctp_transport *sctp_transport_get_idx(struct seq_file *seq,
-                                                    loff_t pos)
-{
-       void *obj = SEQ_START_TOKEN;
-
-       while (pos && (obj = sctp_transport_get_next(seq)) && !IS_ERR(obj))
-               pos--;
-
-       return obj;
-}
-
-static int sctp_transport_walk_start(struct seq_file *seq)
+static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
-       int err;
-
-       err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti,
-                                  GFP_KERNEL);
-       if (err)
-               return err;
-
-       err = rhashtable_walk_start(&iter->hti);
 
-       return err == -EAGAIN ? 0 : err;
+       if (iter->start_fail)
+               return;
+       sctp_transport_walk_stop(&iter->hti);
 }
 
-static void sctp_transport_walk_stop(struct seq_file *seq)
+static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct sctp_ht_iter *iter = seq->private;
 
-       rhashtable_walk_stop(&iter->hti);
-       rhashtable_walk_exit(&iter->hti);
-}
-
-static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       int err = sctp_transport_walk_start(seq);
-
-       if (err)
-               return ERR_PTR(err);
-
-       return sctp_transport_get_idx(seq, *pos);
-}
-
-static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
-{
-       sctp_transport_walk_stop(seq);
-}
-
-static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
        ++*pos;
 
-       return sctp_transport_get_next(seq);
+       return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
 }
 
 /* Display sctp associations (/proc/net/sctp/assocs). */
@@ -417,9 +372,9 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct seq_operations sctp_assoc_ops = {
-       .start = sctp_assocs_seq_start,
-       .next  = sctp_assocs_seq_next,
-       .stop  = sctp_assocs_seq_stop,
+       .start = sctp_transport_seq_start,
+       .next  = sctp_transport_seq_next,
+       .stop  = sctp_transport_seq_stop,
        .show  = sctp_assocs_seq_show,
 };
 
@@ -456,28 +411,6 @@ void sctp_assocs_proc_exit(struct net *net)
        remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
-static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       int err = sctp_transport_walk_start(seq);
-
-       if (err)
-               return ERR_PTR(err);
-
-       return sctp_transport_get_idx(seq, *pos);
-}
-
-static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       ++*pos;
-
-       return sctp_transport_get_next(seq);
-}
-
-static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
-{
-       sctp_transport_walk_stop(seq);
-}
-
 static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
 {
        struct sctp_association *assoc;
@@ -551,9 +484,9 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct seq_operations sctp_remaddr_ops = {
-       .start = sctp_remaddr_seq_start,
-       .next  = sctp_remaddr_seq_next,
-       .stop  = sctp_remaddr_seq_stop,
+       .start = sctp_transport_seq_start,
+       .next  = sctp_transport_seq_next,
+       .stop  = sctp_transport_seq_stop,
        .show  = sctp_remaddr_seq_show,
 };
 
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
new file mode 100644 (file)
index 0000000..8e3e769
--- /dev/null
@@ -0,0 +1,500 @@
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+#include <net/sctp/sctp.h>
+
+extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
+                                     struct sock *sk);
+extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+                                   struct inet_diag_msg *r, int ext,
+                                   struct user_namespace *user_ns);
+
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+                              void *info);
+
+/* define some functions to make asoc/ep fill look clean */
+static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
+                                       struct sock *sk,
+                                       struct sctp_association *asoc)
+{
+       union sctp_addr laddr, paddr;
+       struct dst_entry *dst;
+
+       laddr = list_entry(asoc->base.bind_addr.address_list.next,
+                          struct sctp_sockaddr_entry, list)->a;
+       paddr = asoc->peer.primary_path->ipaddr;
+       dst = asoc->peer.primary_path->dst;
+
+       r->idiag_family = sk->sk_family;
+       r->id.idiag_sport = htons(asoc->base.bind_addr.port);
+       r->id.idiag_dport = htons(asoc->peer.port);
+       r->id.idiag_if = dst ? dst->dev->ifindex : 0;
+       sock_diag_save_cookie(sk, r->id.idiag_cookie);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6) {
+               *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
+               *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
+       } else
+#endif
+       {
+               memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+               memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
+               r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
+               r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
+       }
+
+       r->idiag_state = asoc->state;
+       r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
+       r->idiag_retrans = asoc->rtx_data_chunks;
+       r->idiag_expires = jiffies_to_msecs(
+               asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies);
+}
+
+static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
+                                        struct list_head *address_list)
+{
+       struct sctp_sockaddr_entry *laddr;
+       int addrlen = sizeof(struct sockaddr_storage);
+       int addrcnt = 0;
+       struct nlattr *attr;
+       void *info = NULL;
+
+       list_for_each_entry_rcu(laddr, address_list, list)
+               addrcnt++;
+
+       attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
+       if (!attr)
+               return -EMSGSIZE;
+
+       info = nla_data(attr);
+       list_for_each_entry_rcu(laddr, address_list, list) {
+               memcpy(info, &laddr->a, addrlen);
+               info += addrlen;
+       }
+
+       return 0;
+}
+
+static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
+                                       struct sctp_association *asoc)
+{
+       int addrlen = sizeof(struct sockaddr_storage);
+       struct sctp_transport *from;
+       struct nlattr *attr;
+       void *info = NULL;
+
+       attr = nla_reserve(skb, INET_DIAG_PEERS,
+                          addrlen * asoc->peer.transport_count);
+       if (!attr)
+               return -EMSGSIZE;
+
+       info = nla_data(attr);
+       list_for_each_entry(from, &asoc->peer.transport_addr_list,
+                           transports) {
+               memcpy(info, &from->ipaddr, addrlen);
+               info += addrlen;
+       }
+
+       return 0;
+}
+
+/* sctp asoc/ep fill*/
+static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
+                              struct sk_buff *skb,
+                              const struct inet_diag_req_v2 *req,
+                              struct user_namespace *user_ns,
+                              int portid, u32 seq, u16 nlmsg_flags,
+                              const struct nlmsghdr *unlh)
+{
+       struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+       struct list_head *addr_list;
+       struct inet_diag_msg *r;
+       struct nlmsghdr  *nlh;
+       int ext = req->idiag_ext;
+       struct sctp_infox infox;
+       void *info = NULL;
+
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+                       nlmsg_flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       r = nlmsg_data(nlh);
+       BUG_ON(!sk_fullsock(sk));
+
+       if (asoc) {
+               inet_diag_msg_sctpasoc_fill(r, sk, asoc);
+       } else {
+               inet_diag_msg_common_fill(r, sk);
+               r->idiag_state = sk->sk_state;
+               r->idiag_timer = 0;
+               r->idiag_retrans = 0;
+       }
+
+       if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+               goto errout;
+
+       if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
+               u32 mem[SK_MEMINFO_VARS];
+               int amt;
+
+               if (asoc && asoc->ep->sndbuf_policy)
+                       amt = asoc->sndbuf_used;
+               else
+                       amt = sk_wmem_alloc_get(sk);
+               mem[SK_MEMINFO_WMEM_ALLOC] = amt;
+               if (asoc && asoc->ep->rcvbuf_policy)
+                       amt = atomic_read(&asoc->rmem_alloc);
+               else
+                       amt = sk_rmem_alloc_get(sk);
+               mem[SK_MEMINFO_RMEM_ALLOC] = amt;
+               mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+               mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+               mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+               mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+               mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+               mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+               mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+
+               if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
+                       goto errout;
+       }
+
+       if (ext & (1 << (INET_DIAG_INFO - 1))) {
+               struct nlattr *attr;
+
+               attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
+                                        sizeof(struct sctp_info),
+                                        INET_DIAG_PAD);
+               if (!attr)
+                       goto errout;
+
+               info = nla_data(attr);
+       }
+       infox.sctpinfo = (struct sctp_info *)info;
+       infox.asoc = asoc;
+       sctp_diag_get_info(sk, r, &infox);
+
+       addr_list = asoc ? &asoc->base.bind_addr.address_list
+                        : &ep->base.bind_addr.address_list;
+       if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
+               goto errout;
+
+       if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
+               if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
+                       goto errout;
+
+       if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
+               goto errout;
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+errout:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+/* callback and param */
+struct sctp_comm_param {
+       struct sk_buff *skb;
+       struct netlink_callback *cb;
+       const struct inet_diag_req_v2 *r;
+       const struct nlmsghdr *nlh;
+};
+
+static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+{
+       int addrlen = sizeof(struct sockaddr_storage);
+       int addrcnt = 0;
+       struct sctp_sockaddr_entry *laddr;
+
+       list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
+                               list)
+               addrcnt++;
+
+       return    nla_total_size(sizeof(struct sctp_info))
+               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1) /* INET_DIAG_TOS */
+               + nla_total_size(1) /* INET_DIAG_TCLASS */
+               + nla_total_size(addrlen * asoc->peer.transport_count)
+               + nla_total_size(addrlen * addrcnt)
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
+               + nla_total_size(sizeof(struct inet_diag_msg))
+               + 64;
+}
+
+static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
+{
+       struct sctp_association *assoc = tsp->asoc;
+       struct sock *sk = tsp->asoc->base.sk;
+       struct sctp_comm_param *commp = p;
+       struct sk_buff *in_skb = commp->skb;
+       const struct inet_diag_req_v2 *req = commp->r;
+       const struct nlmsghdr *nlh = commp->nlh;
+       struct net *net = sock_net(in_skb->sk);
+       struct sk_buff *rep;
+       int err;
+
+       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+       if (err)
+               goto out;
+
+       err = -ENOMEM;
+       rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       lock_sock(sk);
+       if (sk != assoc->base.sk) {
+               release_sock(sk);
+               sk = assoc->base.sk;
+               lock_sock(sk);
+       }
+       err = inet_sctp_diag_fill(sk, assoc, rep, req,
+                                 sk_user_ns(NETLINK_CB(in_skb).sk),
+                                 NETLINK_CB(in_skb).portid,
+                                 nlh->nlmsg_seq, 0, nlh);
+       release_sock(sk);
+       if (err < 0) {
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(rep);
+               goto out;
+       }
+
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       return err;
+}
+
+static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
+{
+       struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       struct sk_buff *skb = commp->skb;
+       struct netlink_callback *cb = commp->cb;
+       const struct inet_diag_req_v2 *r = commp->r;
+       struct sctp_association *assoc =
+               list_entry(ep->asocs.next, struct sctp_association, asocs);
+       int err = 0;
+
+       /* find the ep only once through the transports by this condition */
+       if (tsp->asoc != assoc)
+               goto out;
+
+       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+               goto out;
+
+       lock_sock(sk);
+       if (sk != assoc->base.sk)
+               goto release;
+       list_for_each_entry(assoc, &ep->asocs, asocs) {
+               if (cb->args[4] < cb->args[1])
+                       goto next;
+
+               if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
+                   r->id.idiag_sport)
+                       goto next;
+               if (r->id.idiag_dport != htons(assoc->peer.port) &&
+                   r->id.idiag_dport)
+                       goto next;
+
+               if (!cb->args[3] &&
+                   inet_sctp_diag_fill(sk, NULL, skb, r,
+                                       sk_user_ns(NETLINK_CB(cb->skb).sk),
+                                       NETLINK_CB(cb->skb).portid,
+                                       cb->nlh->nlmsg_seq,
+                                       NLM_F_MULTI, cb->nlh) < 0) {
+                       cb->args[3] = 1;
+                       err = 2;
+                       goto release;
+               }
+               cb->args[3] = 1;
+
+               if (inet_sctp_diag_fill(sk, assoc, skb, r,
+                                       sk_user_ns(NETLINK_CB(cb->skb).sk),
+                                       NETLINK_CB(cb->skb).portid,
+                                       cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
+                       err = 2;
+                       goto release;
+               }
+next:
+               cb->args[4]++;
+       }
+       cb->args[1] = 0;
+       cb->args[2]++;
+       cb->args[3] = 0;
+       cb->args[4] = 0;
+release:
+       release_sock(sk);
+       return err;
+out:
+       cb->args[2]++;
+       return err;
+}
+
+static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
+{
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       struct sk_buff *skb = commp->skb;
+       struct netlink_callback *cb = commp->cb;
+       const struct inet_diag_req_v2 *r = commp->r;
+       struct net *net = sock_net(skb->sk);
+       struct inet_sock *inet = inet_sk(sk);
+       int err = 0;
+
+       if (!net_eq(sock_net(sk), net))
+               goto out;
+
+       if (cb->args[4] < cb->args[1])
+               goto next;
+
+       if (r->sdiag_family != AF_UNSPEC &&
+           sk->sk_family != r->sdiag_family)
+               goto next;
+
+       if (r->id.idiag_sport != inet->inet_sport &&
+           r->id.idiag_sport)
+               goto next;
+
+       if (r->id.idiag_dport != inet->inet_dport &&
+           r->id.idiag_dport)
+               goto next;
+
+       if (inet_sctp_diag_fill(sk, NULL, skb, r,
+                               sk_user_ns(NETLINK_CB(cb->skb).sk),
+                               NETLINK_CB(cb->skb).portid,
+                               cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                               cb->nlh) < 0) {
+               err = 2;
+               goto out;
+       }
+next:
+       cb->args[4]++;
+out:
+       return err;
+}
+
+/* define the functions for sctp_diag_handler*/
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+                              void *info)
+{
+       struct sctp_infox *infox = (struct sctp_infox *)info;
+
+       if (infox->asoc) {
+               r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
+               r->idiag_wqueue = infox->asoc->sndbuf_used;
+       } else {
+               r->idiag_rqueue = sk->sk_ack_backlog;
+               r->idiag_wqueue = sk->sk_max_ack_backlog;
+       }
+       if (infox->sctpinfo)
+               sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
+}
+
+static int sctp_diag_dump_one(struct sk_buff *in_skb,
+                             const struct nlmsghdr *nlh,
+                             const struct inet_diag_req_v2 *req)
+{
+       struct net *net = sock_net(in_skb->sk);
+       union sctp_addr laddr, paddr;
+       struct sctp_comm_param commp = {
+               .skb = in_skb,
+               .r = req,
+               .nlh = nlh,
+       };
+
+       if (req->sdiag_family == AF_INET) {
+               laddr.v4.sin_port = req->id.idiag_sport;
+               laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
+               laddr.v4.sin_family = AF_INET;
+
+               paddr.v4.sin_port = req->id.idiag_dport;
+               paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
+               paddr.v4.sin_family = AF_INET;
+       } else {
+               laddr.v6.sin6_port = req->id.idiag_sport;
+               memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
+               laddr.v6.sin6_family = AF_INET6;
+
+               paddr.v6.sin6_port = req->id.idiag_dport;
+               memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
+               paddr.v6.sin6_family = AF_INET6;
+       }
+
+       return sctp_transport_lookup_process(sctp_tsp_dump_one,
+                                            net, &laddr, &paddr, &commp);
+}
+
+static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                          const struct inet_diag_req_v2 *r, struct nlattr *bc)
+{
+       u32 idiag_states = r->idiag_states;
+       struct net *net = sock_net(skb->sk);
+       struct sctp_comm_param commp = {
+               .skb = skb,
+               .cb = cb,
+               .r = r,
+       };
+
+       /* eps hashtable dumps
+        * args:
+        * 0 : if it will traversal listen sock
+        * 1 : to record the sock pos of this time's traversal
+        * 4 : to work as a temporary variable to traversal list
+        */
+       if (cb->args[0] == 0) {
+               if (!(idiag_states & TCPF_LISTEN))
+                       goto skip;
+               if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
+                       goto done;
+skip:
+               cb->args[0] = 1;
+               cb->args[1] = 0;
+               cb->args[4] = 0;
+       }
+
+       /* asocs by transport hashtable dump
+        * args:
+        * 1 : to record the assoc pos of this time's traversal
+        * 2 : to record the transport pos of this time's traversal
+        * 3 : to mark if we have dumped the ep info of the current asoc
+        * 4 : to work as a temporary variable to traversal list
+        */
+       if (!(idiag_states & ~TCPF_LISTEN))
+               goto done;
+       sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
+done:
+       cb->args[1] = cb->args[4];
+       cb->args[4] = 0;
+}
+
+static const struct inet_diag_handler sctp_diag_handler = {
+       .dump            = sctp_diag_dump,
+       .dump_one        = sctp_diag_dump_one,
+       .idiag_get_info  = sctp_diag_get_info,
+       .idiag_type      = IPPROTO_SCTP,
+       .idiag_info_size = sizeof(struct sctp_info),
+};
+
+static int __init sctp_diag_init(void)
+{
+       return inet_diag_register(&sctp_diag_handler);
+}
+
+static void __exit sctp_diag_exit(void)
+{
+       inet_diag_unregister(&sctp_diag_handler);
+}
+
+module_init(sctp_diag_init);
+module_exit(sctp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
index 7f0bf798205ba3c5311bf57ace66aaaa57cb0367..56f364d8f93270f31867333585fb28317f9d87ad 100644 (file)
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
                        return SCTP_ERROR_RSRC_LOW;
 
                /* Start the heartbeat timer. */
-               if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
-                       sctp_transport_hold(peer);
+               sctp_transport_reset_hb_timer(peer);
                asoc->new_transport = peer;
                break;
        case SCTP_PARAM_DEL_IP:
index 7fe56d0acabf66cfd8fe29dfdb45f7620b470ac7..aa37122593684d8501fdca15983fbd8620fabe07 100644 (file)
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                             sctp_cmd_seq_t *commands,
                             gfp_t gfp);
 
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t);
 /********************************************************************
  * Helper functions
  ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        struct sctp_association *asoc = transport->asoc;
        struct sock *sk = asoc->base.sk;
        struct net *net = sock_net(sk);
+       u32 elapsed, timeout;
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
                goto out_unlock;
        }
 
+       /* Check if we should still send the heartbeat or reschedule */
+       elapsed = jiffies - transport->last_time_sent;
+       timeout = sctp_transport_timeout(transport);
+       if (elapsed < timeout) {
+               elapsed = timeout - elapsed;
+               if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+                       sctp_transport_hold(transport);
+               goto out_unlock;
+       }
+
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
                                             0);
 
                /* Update the hb timer to resend a heartbeat every rto */
-               sctp_cmd_hb_timer_update(commands, transport);
+               sctp_transport_reset_hb_timer(transport);
        }
 
        if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
         * hold a reference on the transport to make sure none of
         * the needed data structures go away.
         */
-       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
-
-               if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-                       sctp_transport_hold(t);
-       }
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               sctp_transport_reset_hb_timer(t);
 }
 
 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
 }
 
 
-/* Helper function to update the heartbeat timer. */
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t)
-{
-       /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
-}
-
 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
                                  struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
 
        /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
+       sctp_transport_reset_hb_timer(t);
 
        if (was_unconfirmed && asoc->peer.transport_count == 1)
                sctp_transport_immediate_rtx(t);
@@ -1222,6 +1218,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                                sctp_cmd_seq_t *commands,
                                gfp_t gfp)
 {
+       struct sock *sk = ep->base.sk;
+       struct sctp_sock *sp = sctp_sk(sk);
        int error = 0;
        int force;
        sctp_cmd_t *cmd;
@@ -1614,7 +1612,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
 
                case SCTP_CMD_HB_TIMER_UPDATE:
                        t = cmd->obj.transport;
-                       sctp_cmd_hb_timer_update(commands, t);
+                       sctp_transport_reset_hb_timer(t);
                        break;
 
                case SCTP_CMD_HB_TIMERS_STOP:
@@ -1742,6 +1740,10 @@ out:
                        error = sctp_outq_uncork(&asoc->outqueue, gfp);
        } else if (local_cork)
                error = sctp_outq_uncork(&asoc->outqueue, gfp);
+
+       if (sp->data_ready_signalled)
+               sp->data_ready_signalled = 0;
+
        return error;
 nomem:
        error = -ENOMEM;
index 878d28eda1a68dc639d7b9f3f663d7518f21bb32..777d0324594a33a407e9ec157a7634334b1292e2 100644 (file)
@@ -4202,6 +4202,222 @@ static void sctp_shutdown(struct sock *sk, int how)
        }
 }
 
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+                      struct sctp_info *info)
+{
+       struct sctp_transport *prim;
+       struct list_head *pos;
+       int mask;
+
+       memset(info, 0, sizeof(*info));
+       if (!asoc) {
+               struct sctp_sock *sp = sctp_sk(sk);
+
+               info->sctpi_s_autoclose = sp->autoclose;
+               info->sctpi_s_adaptation_ind = sp->adaptation_ind;
+               info->sctpi_s_pd_point = sp->pd_point;
+               info->sctpi_s_nodelay = sp->nodelay;
+               info->sctpi_s_disable_fragments = sp->disable_fragments;
+               info->sctpi_s_v4mapped = sp->v4mapped;
+               info->sctpi_s_frag_interleave = sp->frag_interleave;
+
+               return 0;
+       }
+
+       info->sctpi_tag = asoc->c.my_vtag;
+       info->sctpi_state = asoc->state;
+       info->sctpi_rwnd = asoc->a_rwnd;
+       info->sctpi_unackdata = asoc->unack_data;
+       info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
+       info->sctpi_instrms = asoc->c.sinit_max_instreams;
+       info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+       list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
+               info->sctpi_inqueue++;
+       list_for_each(pos, &asoc->outqueue.out_chunk_list)
+               info->sctpi_outqueue++;
+       info->sctpi_overall_error = asoc->overall_error_count;
+       info->sctpi_max_burst = asoc->max_burst;
+       info->sctpi_maxseg = asoc->frag_point;
+       info->sctpi_peer_rwnd = asoc->peer.rwnd;
+       info->sctpi_peer_tag = asoc->c.peer_vtag;
+
+       mask = asoc->peer.ecn_capable << 1;
+       mask = (mask | asoc->peer.ipv4_address) << 1;
+       mask = (mask | asoc->peer.ipv6_address) << 1;
+       mask = (mask | asoc->peer.hostname_address) << 1;
+       mask = (mask | asoc->peer.asconf_capable) << 1;
+       mask = (mask | asoc->peer.prsctp_capable) << 1;
+       mask = (mask | asoc->peer.auth_capable);
+       info->sctpi_peer_capable = mask;
+       mask = asoc->peer.sack_needed << 1;
+       mask = (mask | asoc->peer.sack_generation) << 1;
+       mask = (mask | asoc->peer.zero_window_announced);
+       info->sctpi_peer_sack = mask;
+
+       info->sctpi_isacks = asoc->stats.isacks;
+       info->sctpi_osacks = asoc->stats.osacks;
+       info->sctpi_opackets = asoc->stats.opackets;
+       info->sctpi_ipackets = asoc->stats.ipackets;
+       info->sctpi_rtxchunks = asoc->stats.rtxchunks;
+       info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
+       info->sctpi_idupchunks = asoc->stats.idupchunks;
+       info->sctpi_gapcnt = asoc->stats.gapcnt;
+       info->sctpi_ouodchunks = asoc->stats.ouodchunks;
+       info->sctpi_iuodchunks = asoc->stats.iuodchunks;
+       info->sctpi_oodchunks = asoc->stats.oodchunks;
+       info->sctpi_iodchunks = asoc->stats.iodchunks;
+       info->sctpi_octrlchunks = asoc->stats.octrlchunks;
+       info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
+
+       prim = asoc->peer.primary_path;
+       memcpy(&info->sctpi_p_address, &prim->ipaddr,
+              sizeof(struct sockaddr_storage));
+       info->sctpi_p_state = prim->state;
+       info->sctpi_p_cwnd = prim->cwnd;
+       info->sctpi_p_srtt = prim->srtt;
+       info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
+       info->sctpi_p_hbinterval = prim->hbinterval;
+       info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
+       info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
+       info->sctpi_p_ssthresh = prim->ssthresh;
+       info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
+       info->sctpi_p_flight_size = prim->flight_size;
+       info->sctpi_p_error = prim->error_count;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
+
+/* use callback to avoid exporting the core structure */
+int sctp_transport_walk_start(struct rhashtable_iter *iter)
+{
+       int err;
+
+       err = rhashtable_walk_init(&sctp_transport_hashtable, iter,
+                                  GFP_KERNEL);
+       if (err)
+               return err;
+
+       err = rhashtable_walk_start(iter);
+       if (err && err != -EAGAIN) {
+               rhashtable_walk_exit(iter);
+               return err;
+       }
+
+       return 0;
+}
+
+void sctp_transport_walk_stop(struct rhashtable_iter *iter)
+{
+       rhashtable_walk_stop(iter);
+       rhashtable_walk_exit(iter);
+}
+
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+                                              struct rhashtable_iter *iter)
+{
+       struct sctp_transport *t;
+
+       t = rhashtable_walk_next(iter);
+       for (; t; t = rhashtable_walk_next(iter)) {
+               if (IS_ERR(t)) {
+                       if (PTR_ERR(t) == -EAGAIN)
+                               continue;
+                       break;
+               }
+
+               if (net_eq(sock_net(t->asoc->base.sk), net) &&
+                   t->asoc->peer.primary_path == t)
+                       break;
+       }
+
+       return t;
+}
+
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+                                             struct rhashtable_iter *iter,
+                                             int pos)
+{
+       void *obj = SEQ_START_TOKEN;
+
+       while (pos && (obj = sctp_transport_get_next(net, iter)) &&
+              !IS_ERR(obj))
+               pos--;
+
+       return obj;
+}
+
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
+                          void *p) {
+       int err = 0;
+       int hash = 0;
+       struct sctp_ep_common *epb;
+       struct sctp_hashbucket *head;
+
+       for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
+            hash++, head++) {
+               read_lock(&head->lock);
+               sctp_for_each_hentry(epb, &head->chain) {
+                       err = cb(sctp_ep(epb), p);
+                       if (err)
+                               break;
+               }
+               read_unlock(&head->lock);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
+
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+                                 struct net *net,
+                                 const union sctp_addr *laddr,
+                                 const union sctp_addr *paddr, void *p)
+{
+       struct sctp_transport *transport;
+       int err = 0;
+
+       rcu_read_lock();
+       transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+       if (!transport || !sctp_transport_hold(transport))
+               goto out;
+       err = cb(transport, p);
+       sctp_transport_put(transport);
+
+out:
+       rcu_read_unlock();
+       return err;
+}
+EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+                           struct net *net, int pos, void *p) {
+       struct rhashtable_iter hti;
+       void *obj;
+       int err;
+
+       err = sctp_transport_walk_start(&hti);
+       if (err)
+               return err;
+
+       sctp_transport_get_idx(net, &hti, pos);
+       obj = sctp_transport_get_next(net, &hti);
+       for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+               struct sctp_transport *transport = obj;
+
+               if (!sctp_transport_hold(transport))
+                       continue;
+               err = cb(transport, p);
+               sctp_transport_put(transport);
+               if (err)
+                       break;
+       }
+       sctp_transport_walk_stop(&hti);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+
 /* 7.2.1 Association Status (SCTP_STATUS)
 
  * Applications can retrieve current status information about an
@@ -6430,6 +6646,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        poll_wait(file, sk_sleep(sk), wait);
 
+       sock_rps_record_flow(sk);
+
        /* A TCP-style listening socket becomes readable when the accept queue
         * is not empty.
         */
@@ -6764,13 +6982,11 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                 *  However, this function was correct in any case. 8)
                 */
                if (flags & MSG_PEEK) {
-                       spin_lock_bh(&sk->sk_receive_queue.lock);
                        skb = skb_peek(&sk->sk_receive_queue);
                        if (skb)
                                atomic_inc(&skb->users);
-                       spin_unlock_bh(&sk->sk_receive_queue.lock);
                } else {
-                       skb = skb_dequeue(&sk->sk_receive_queue);
+                       skb = __skb_dequeue(&sk->sk_receive_queue);
                }
 
                if (skb)
@@ -7186,6 +7402,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newsk->sk_lingertime = sk->sk_lingertime;
        newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
        newsk->sk_sndtimeo = sk->sk_sndtimeo;
+       newsk->sk_rxhash = sk->sk_rxhash;
 
        newinet = inet_sk(newsk);
 
index 9b6b48c7524e4b441a151b80f0babec81f539d49..81b86678be4d6fccc527d3c3e509c12576b2194c 100644 (file)
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
 /* Start T3_rtx timer if it is not already running and update the heartbeat
  * timer.  This routine is called every time a DATA chunk is sent.
  */
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
 {
        /* RFC 2960 6.3.2 Retransmission Timer Rules
         *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
                if (!mod_timer(&transport->T3_rtx_timer,
                               jiffies + transport->rto))
                        sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+       unsigned long expires;
 
        /* When a data chunk is sent, reset the heartbeat interval.  */
-       if (!mod_timer(&transport->hb_timer,
-                      sctp_transport_timeout(transport)))
-           sctp_transport_hold(transport);
+       expires = jiffies + sctp_transport_timeout(transport);
+       if (time_before(transport->hb_timer.expires, expires) &&
+           !mod_timer(&transport->hb_timer,
+                      expires + prandom_u32_max(transport->rto)))
+               sctp_transport_hold(transport);
 }
 
 /* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
        /* RTO + timer slack +/- 50% of RTO */
-       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+       unsigned long timeout = trans->rto >> 1;
 
        if (trans->state != SCTP_UNCONFIRMED &&
            trans->state != SCTP_PF)
                timeout += trans->hbinterval;
 
-       return timeout + jiffies;
+       return timeout;
 }
 
 /* Reset transport variables to their initial values */
index ce469d648ffbe166f9ae1c5650f481256f31a7f8..ec166d2bd2d95d9aa69369da2ead9437da4ce8ed 100644 (file)
@@ -141,7 +141,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
                 */
                if (!skb_queue_empty(&sp->pd_lobby)) {
                        struct list_head *list;
-                       sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
+                       skb_queue_splice_tail_init(&sp->pd_lobby,
+                                                  &sk->sk_receive_queue);
                        list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
                        INIT_LIST_HEAD(list);
                        return 1;
@@ -193,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 {
        struct sock *sk = ulpq->asoc->base.sk;
+       struct sctp_sock *sp = sctp_sk(sk);
        struct sk_buff_head *queue, *skb_list;
        struct sk_buff *skb = sctp_event2skb(event);
        int clear_pd = 0;
@@ -210,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                sk_incoming_cpu_update(sk);
        }
        /* Check if the user wishes to receive this event.  */
-       if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
+       if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
                goto out_free;
 
        /* If we are in partial delivery mode, post to the lobby until
@@ -218,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
         * the association the cause of the partial delivery.
         */
 
-       if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
+       if (atomic_read(&sp->pd_mode) == 0) {
                queue = &sk->sk_receive_queue;
        } else {
                if (ulpq->pd_mode) {
@@ -230,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                        if ((event->msg_flags & MSG_NOTIFICATION) ||
                            (SCTP_DATA_NOT_FRAG ==
                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
-                               queue = &sctp_sk(sk)->pd_lobby;
+                               queue = &sp->pd_lobby;
                        else {
                                clear_pd = event->msg_flags & MSG_EOR;
                                queue = &sk->sk_receive_queue;
@@ -241,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                         * can queue this to the receive queue instead
                         * of the lobby.
                         */
-                       if (sctp_sk(sk)->frag_interleave)
+                       if (sp->frag_interleave)
                                queue = &sk->sk_receive_queue;
                        else
-                               queue = &sctp_sk(sk)->pd_lobby;
+                               queue = &sp->pd_lobby;
                }
        }
 
@@ -252,7 +254,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
         * collected on a list.
         */
        if (skb_list)
-               sctp_skb_list_tail(skb_list, queue);
+               skb_queue_splice_tail_init(skb_list, queue);
        else
                __skb_queue_tail(queue, skb);
 
@@ -263,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
        if (clear_pd)
                sctp_ulpq_clear_pd(ulpq);
 
-       if (queue == &sk->sk_receive_queue)
+       if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+               sp->data_ready_signalled = 1;
                sk->sk_data_ready(sk);
+       }
        return 1;
 
 out_free:
@@ -1125,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 {
        struct sctp_ulpevent *ev = NULL;
        struct sock *sk;
+       struct sctp_sock *sp;
 
        if (!ulpq->pd_mode)
                return;
 
        sk = ulpq->asoc->base.sk;
+       sp = sctp_sk(sk);
        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
                                       &sctp_sk(sk)->subscribe))
                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
@@ -1139,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
        /* If there is data waiting, send it up the socket now. */
-       if (sctp_ulpq_clear_pd(ulpq) || ev)
+       if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
+               sp->data_ready_signalled = 1;
                sk->sk_data_ready(sk);
+       }
 }
index afa3c3470717356ab89dd629537efbf60cf390d4..7789d79609dd1a5a80b22135c6983725d40f59a3 100644 (file)
@@ -600,9 +600,6 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
        if (tsflags & SOF_TIMESTAMPING_TX_SCHED)
                flags |= SKBTX_SCHED_TSTAMP;
 
-       if (tsflags & SOF_TIMESTAMPING_TX_ACK)
-               flags |= SKBTX_ACK_TSTAMP;
-
        *tx_flags = flags;
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
@@ -709,17 +706,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
-                                    size_t size, int flags)
+                                    int flags)
 {
-       return sock->ops->recvmsg(sock, msg, size, flags);
+       return sock->ops->recvmsg(sock, msg, msg_data_left(msg), flags);
 }
 
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
-                int flags)
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
 {
-       int err = security_socket_recvmsg(sock, msg, size, flags);
+       int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags);
 
-       return err ?: sock_recvmsg_nosec(sock, msg, size, flags);
+       return err ?: sock_recvmsg_nosec(sock, msg, flags);
 }
 EXPORT_SYMBOL(sock_recvmsg);
 
@@ -746,7 +742,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
 
        iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
        set_fs(KERNEL_DS);
-       result = sock_recvmsg(sock, msg, size, flags);
+       result = sock_recvmsg(sock, msg, flags);
        set_fs(oldfs);
        return result;
 }
@@ -796,7 +792,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
        if (!iov_iter_count(to))        /* Match SYS5 behaviour */
                return 0;
 
-       res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags);
+       res = sock_recvmsg(sock, &msg, msg.msg_flags);
        *to = msg.msg_iter;
        return res;
 }
@@ -1696,7 +1692,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
        msg.msg_iocb = NULL;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
-       err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
+       err = sock_recvmsg(sock, &msg, flags);
 
        if (err >= 0 && addr != NULL) {
                err2 = move_addr_to_user(&address,
@@ -2073,7 +2069,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        unsigned long cmsg_ptr;
-       int total_len, len;
+       int len;
        ssize_t err;
 
        /* kernel mode address */
@@ -2091,7 +2087,6 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
                err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
        if (err < 0)
                return err;
-       total_len = iov_iter_count(&msg_sys->msg_iter);
 
        cmsg_ptr = (unsigned long)msg_sys->msg_control;
        msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
@@ -2101,8 +2096,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
 
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
-       err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
-                                                         total_len, flags);
+       err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags);
        if (err < 0)
                goto out_freeiov;
        len = err;
index 8c6bc795f0602a9991fc2b0782b965d96d6b5bf9..15612ffa8d57271c2dc8cac81a148d910a3cff99 100644 (file)
@@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
                return 0;
        }
 
-       first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
-       last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+       first = snd_buf->page_base >> PAGE_SHIFT;
+       last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
        rqstp->rq_enc_pages_num = last - first + 1 + 1;
        rqstp->rq_enc_pages
                = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
        status = alloc_enc_pages(rqstp);
        if (status)
                return status;
-       first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+       first = snd_buf->page_base >> PAGE_SHIFT;
        inpages = snd_buf->pages + first;
        snd_buf->pages = rqstp->rq_enc_pages;
-       snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+       snd_buf->page_base -= first << PAGE_SHIFT;
        /*
         * Give the tail its own page, in case we need extra space in the
         * head when wrapping:
index d94a8e1e9f05b37cdb3bc82a3b419a0b0b8c1bbf..244245bcbbd25554938ab099137799d47ef6791b 100644 (file)
@@ -78,6 +78,7 @@ krb5_encrypt(
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
+       skcipher_request_set_tfm(req, tfm);
        skcipher_request_set_callback(req, 0, NULL, NULL);
        skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -115,6 +116,7 @@ krb5_decrypt(
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
+       skcipher_request_set_tfm(req, tfm);
        skcipher_request_set_callback(req, 0, NULL, NULL);
        skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -465,7 +467,7 @@ encryptor(struct scatterlist *sg, void *data)
        page_pos = desc->pos - outbuf->head[0].iov_len;
        if (page_pos >= 0 && page_pos < outbuf->page_len) {
                /* pages are not in place: */
-               int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+               int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
                in_page = desc->pages[i];
        } else {
                in_page = sg_page(sg);
@@ -946,7 +948,8 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                return PTR_ERR(hmac);
        }
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate shash descriptor for '%s'\n",
                        __func__, kctx->gk5e->cksum_name);
@@ -1012,7 +1015,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                return PTR_ERR(hmac);
        }
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate shash descriptor for '%s'\n",
                        __func__, kctx->gk5e->cksum_name);
index 71341ccb989043acd4c6d449a6d89d9db8b98513..65427492b1c95f681f79345e7ce29c62d941e147 100644 (file)
@@ -451,7 +451,8 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
                goto out_err_free_hmac;
 
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate hash descriptor for '%s'\n",
                        __func__, ctx->gk5e->cksum_name);
index 765088e4ad84d073b3587917942b9059717875b3..a737c2da08373e33aabb42b994cf83a02182711a 100644 (file)
@@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
                len -= buf->head[0].iov_len;
        if (len <= buf->page_len) {
                unsigned int last = (buf->page_base + len - 1)
-                                       >>PAGE_CACHE_SHIFT;
+                                       >>PAGE_SHIFT;
                unsigned int offset = (buf->page_base + len - 1)
-                                       & (PAGE_CACHE_SIZE - 1);
+                                       & (PAGE_SIZE - 1);
                ptr = kmap_atomic(buf->pages[last]);
                pad = *(ptr + offset);
                kunmap_atomic(ptr);
index 008c25d1b9f9c8c955e0da6fe631a2207966f2bb..553bf95f700301a4821d4317e9277f784370bd22 100644 (file)
@@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
        char *kaddr;
        ssize_t ret = -ENOMEM;
 
-       if (count >= PAGE_CACHE_SIZE)
+       if (count >= PAGE_SIZE)
                goto out_slow;
 
        page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
        ret = cache_do_downcall(kaddr, buf, count, cd);
        kunmap(page);
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
        return ret;
 out_slow:
        return cache_slow_downcall(buf, count, cd);
index 31789ef3e614484a4d5c75721d4b9f63fc735f6a..fc48eca21fd2edb5a4b7ef9770cd03e106956b10 100644 (file)
@@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int err;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_blocksize = PAGE_SIZE;
+       sb->s_blocksize_bits = PAGE_SHIFT;
        sb->s_magic = RPCAUTH_GSSMAGIC;
        sb->s_op = &s_ops;
        sb->s_d_op = &simple_dentry_operations;
index 2df87f78e518eab4f4fedd37eb38de1ae6c8aaea..f217c348b34100b4cd47e4864b22cb648c4fc141 100644 (file)
@@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
        if (base || xdr->page_base) {
                pglen -= base;
                base += xdr->page_base;
-               ppage += base >> PAGE_CACHE_SHIFT;
-               base &= ~PAGE_CACHE_MASK;
+               ppage += base >> PAGE_SHIFT;
+               base &= ~PAGE_MASK;
        }
        do {
                char *kaddr;
@@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
                        }
                }
 
-               len = PAGE_CACHE_SIZE;
+               len = PAGE_SIZE;
                kaddr = kmap_atomic(*ppage);
                if (base) {
                        len -= base;
@@ -155,7 +155,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
        struct xdr_skb_reader   desc;
 
        desc.skb = skb;
-       desc.offset = sizeof(struct udphdr);
+       desc.offset = 0;
        desc.count = skb->len - desc.offset;
 
        if (skb_csum_unnecessary(skb))
index 1413cdcc131c4a7ed49487e4a19691644e0ca65c..dadfec66dbd8abd301fa3ac307431dabe7999ec6 100644 (file)
@@ -85,8 +85,7 @@ static void svc_reclassify_socket(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       WARN_ON_ONCE(sock_owned_by_user(sk));
-       if (sock_owned_by_user(sk))
+       if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
                return;
 
        switch (sk->sk_family) {
@@ -617,7 +616,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
        svsk->sk_sk->sk_stamp = skb->tstamp;
        set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
-       len  = skb->len - sizeof(struct udphdr);
+       len  = skb->len;
        rqstp->rq_arg.len = len;
 
        rqstp->rq_prot = IPPROTO_UDP;
@@ -641,8 +640,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
                skb_free_datagram_locked(svsk->sk_sk, skb);
        } else {
                /* we can use it in-place */
-               rqstp->rq_arg.head[0].iov_base = skb->data +
-                       sizeof(struct udphdr);
+               rqstp->rq_arg.head[0].iov_base = skb->data;
                rqstp->rq_arg.head[0].iov_len = len;
                if (skb_checksum_complete(skb))
                        goto out_free;
index 4439ac4c1b53fcaf12a8a06723b4a51330dddeae..6bdb3865212d2edee16454970e21903870fd5d18 100644 (file)
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
  * Note: the addresses pgto_base and pgfrom_base are both calculated in
  *       the same way:
  *            if a memory area starts at byte 'base' in page 'pages[i]',
- *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
+ *            then its address is given as (i << PAGE_SHIFT) + base
  * Also note: pgfrom_base must be < pgto_base, but the memory areas
  *     they point to may overlap.
  */
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
        pgto_base += len;
        pgfrom_base += len;
 
-       pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
-       pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
+       pgto = pages + (pgto_base >> PAGE_SHIFT);
+       pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
 
-       pgto_base &= ~PAGE_CACHE_MASK;
-       pgfrom_base &= ~PAGE_CACHE_MASK;
+       pgto_base &= ~PAGE_MASK;
+       pgfrom_base &= ~PAGE_MASK;
 
        do {
                /* Are any pointers crossing a page boundary? */
                if (pgto_base == 0) {
-                       pgto_base = PAGE_CACHE_SIZE;
+                       pgto_base = PAGE_SIZE;
                        pgto--;
                }
                if (pgfrom_base == 0) {
-                       pgfrom_base = PAGE_CACHE_SIZE;
+                       pgfrom_base = PAGE_SIZE;
                        pgfrom--;
                }
 
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
        char *vto;
        size_t copy;
 
-       pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
-       pgbase &= ~PAGE_CACHE_MASK;
+       pgto = pages + (pgbase >> PAGE_SHIFT);
+       pgbase &= ~PAGE_MASK;
 
        for (;;) {
-               copy = PAGE_CACHE_SIZE - pgbase;
+               copy = PAGE_SIZE - pgbase;
                if (copy > len)
                        copy = len;
 
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
                        break;
 
                pgbase += copy;
-               if (pgbase == PAGE_CACHE_SIZE) {
+               if (pgbase == PAGE_SIZE) {
                        flush_dcache_page(*pgto);
                        pgbase = 0;
                        pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
        char *vfrom;
        size_t copy;
 
-       pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
-       pgbase &= ~PAGE_CACHE_MASK;
+       pgfrom = pages + (pgbase >> PAGE_SHIFT);
+       pgbase &= ~PAGE_MASK;
 
        do {
-               copy = PAGE_CACHE_SIZE - pgbase;
+               copy = PAGE_SIZE - pgbase;
                if (copy > len)
                        copy = len;
 
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
                kunmap_atomic(vfrom);
 
                pgbase += copy;
-               if (pgbase == PAGE_CACHE_SIZE) {
+               if (pgbase == PAGE_SIZE) {
                        pgbase = 0;
                        pgfrom++;
                }
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
        if (base < buf->page_len) {
                subbuf->page_len = min(buf->page_len - base, len);
                base += buf->page_base;
-               subbuf->page_base = base & ~PAGE_CACHE_MASK;
-               subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
+               subbuf->page_base = base & ~PAGE_MASK;
+               subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
                len -= subbuf->page_len;
                base = 0;
        } else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
                todo -= avail_here;
 
                base += buf->page_base;
-               ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
-               base &= ~PAGE_CACHE_MASK;
-               avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
+               ppages = buf->pages + (base >> PAGE_SHIFT);
+               base &= ~PAGE_MASK;
+               avail_page = min_t(unsigned int, PAGE_SIZE - base,
                                        avail_here);
                c = kmap(*ppages) + base;
 
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
                        }
 
                        avail_page = min(avail_here,
-                                (unsigned int) PAGE_CACHE_SIZE);
+                                (unsigned int) PAGE_SIZE);
                }
                base = buf->page_len;  /* align to start of tail */
        }
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
                if (page_len > len)
                        page_len = len;
                len -= page_len;
-               page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
-               i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
-               thislen = PAGE_CACHE_SIZE - page_offset;
+               page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
+               i = (offset + buf->page_base) >> PAGE_SHIFT;
+               thislen = PAGE_SIZE - page_offset;
                do {
                        if (thislen > page_len)
                                thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
                        page_len -= thislen;
                        i++;
                        page_offset = 0;
-                       thislen = PAGE_CACHE_SIZE;
+                       thislen = PAGE_SIZE;
                } while (page_len != 0);
                offset = 0;
        }
index 65e759569e4873619735b966cd055b9d634d2d3a..a6c68dc086af83233ee315642638f4a1990ee622 100644 (file)
@@ -995,15 +995,14 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
        u32 _xid;
        __be32 *xp;
 
-       repsize = skb->len - sizeof(struct udphdr);
+       repsize = skb->len;
        if (repsize < 4) {
                dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
                return;
        }
 
        /* Copy the XID from the skb... */
-       xp = skb_header_pointer(skb, sizeof(struct udphdr),
-                               sizeof(_xid), &_xid);
+       xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
        if (xp == NULL)
                return;
 
@@ -1019,11 +1018,11 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
 
        /* Suck it into the iovec, verify checksum if not done by hw. */
        if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
-               UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
+               __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
                goto out_unlock;
        }
 
-       UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
+       __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
 
        xprt_adjust_cwnd(xprt, task, copied);
        xprt_complete_rqst(task, copied);
@@ -1881,8 +1880,7 @@ static inline void xs_reclassify_socket6(struct socket *sock)
 
 static inline void xs_reclassify_socket(int family, struct socket *sock)
 {
-       WARN_ON_ONCE(sock_owned_by_user(sock->sk));
-       if (sock_owned_by_user(sock->sk))
+       if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
                return;
 
        switch (family) {
index 2b9b98f1c2ff2a9092e0d84af5731ce5a458fd35..b7e01d88bdc5f74c85813b3fb9fa1bbf358abaaf 100644 (file)
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
                           err, attr->id);
+       if (attr->complete)
+               attr->complete(dev, err, attr->complete_priv);
 }
 
 static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
                           err, obj->id);
+       if (obj->complete)
+               obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
                           err, obj->id);
+       if (obj->complete)
+               obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_del_defer(struct net_device *dev,
index e31820516774037af2e6bf4ebef28a81f984607e..f686e41b5abb880cc09f9c1a707356047d32749f 100644 (file)
@@ -42,8 +42,6 @@
 #include <net/genetlink.h>
 
 #define MAX_MEDIA      3
-#define MAX_NODES      4096
-#define WSIZE          32
 
 /* Identifiers associated with TIPC message header media address info
  * - address info field is 32 bytes long
 #define TIPC_MEDIA_TYPE_IB     2
 #define TIPC_MEDIA_TYPE_UDP    3
 
-/**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
- */
-struct tipc_node_map {
-       u32 count;
-       u32 map[MAX_NODES / WSIZE];
-};
-
 /**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
@@ -142,7 +130,6 @@ struct tipc_media {
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
- * @nodes: indicates which nodes in cluster can be reached through bearer
  *
  * Note: media-specific code is responsible for initialization of the fields
  * indicated below when a bearer is enabled; TIPC's generic bearer code takes
@@ -163,8 +150,6 @@ struct tipc_bearer {
        u32 identity;
        struct tipc_link_req *link_req;
        char net_plane;
-       int node_cnt;
-       struct tipc_node_map nodes;
 };
 
 struct tipc_bearer_names {
index 03a842870c52d22ca5c8e72ce063f78ba8391521..fe1b062c4f18a96754c22001934f45eedb9ae8cc 100644 (file)
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
        if (err)
                goto out_nametbl;
 
+       INIT_LIST_HEAD(&tn->dist_queue);
        err = tipc_topsrv_start(net);
        if (err)
                goto out_subscr;
@@ -111,11 +112,9 @@ static int __init tipc_init(void)
 
        pr_info("Activated (version " TIPC_MOD_VER ")\n");
 
-       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_LOW_IMPORTANCE;
-       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_CRITICAL_IMPORTANCE;
-       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
+       sysctl_tipc_rmem[0] = RCVBUF_MIN;
+       sysctl_tipc_rmem[1] = RCVBUF_DEF;
+       sysctl_tipc_rmem[2] = RCVBUF_MAX;
 
        err = tipc_netlink_start();
        if (err)
index 5504d63503df406f7dd1bb1245b85132fcd193ed..eff58dc53aa12b5d3644bd364b08f60f6d6cd7be 100644 (file)
@@ -103,6 +103,9 @@ struct tipc_net {
        spinlock_t nametbl_lock;
        struct name_table *nametbl;
 
+       /* Name dist queue */
+       struct list_head dist_queue;
+
        /* Topology subscription server */
        struct tipc_server *topsrv;
        atomic_t subscription_count;
index 7d2bb3e70baa8b673922a8d2e58a67304244e5e9..7059c94f33c55f03ce70e193a017cbf2bb441f3e 100644 (file)
@@ -140,6 +140,7 @@ struct tipc_link {
        char if_name[TIPC_MAX_IF_NAME];
        u32 priority;
        char net_plane;
+       u16 rst_cnt;
 
        /* Failover/synch */
        u16 drop_point;
@@ -699,42 +700,36 @@ static void link_profile_stats(struct tipc_link *l)
                l->stats.msg_length_profile[6]++;
 }
 
-/* tipc_link_timeout - perform periodic task as instructed from node timeout
- */
 /* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       int rc = 0;
-       int mtyp = STATE_MSG;
-       bool xmit = false;
-       bool prb = false;
+       int mtyp, rc = 0;
+       bool state = false;
+       bool probe = false;
+       bool setup = false;
        u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
        u16 bc_acked = l->bc_rcvlink->acked;
-       bool bc_up = link_is_up(l->bc_rcvlink);
 
        link_profile_stats(l);
 
        switch (l->state) {
        case LINK_ESTABLISHED:
        case LINK_SYNCHING:
-               if (!l->silent_intv_cnt) {
-                       if (bc_up && (bc_acked != bc_snt))
-                               xmit = true;
-               } else if (l->silent_intv_cnt <= l->abort_limit) {
-                       xmit = true;
-                       prb = true;
-               } else {
-                       rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
-               }
+               if (l->silent_intv_cnt > l->abort_limit)
+                       return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               mtyp = STATE_MSG;
+               state = bc_acked != bc_snt;
+               probe = l->silent_intv_cnt;
                l->silent_intv_cnt++;
                break;
        case LINK_RESET:
-               xmit = true;
+               setup = l->rst_cnt++ <= 4;
+               setup |= !(l->rst_cnt % 16);
                mtyp = RESET_MSG;
                break;
        case LINK_ESTABLISHING:
-               xmit = true;
+               setup = true;
                mtyp = ACTIVATE_MSG;
                break;
        case LINK_PEER_RESET:
@@ -745,8 +740,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
                break;
        }
 
-       if (xmit)
-               tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
+       if (state || probe || setup)
+               tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
 
        return rc;
 }
@@ -833,6 +828,7 @@ void tipc_link_reset(struct tipc_link *l)
        l->rcv_nxt = 1;
        l->acked = 0;
        l->silent_intv_cnt = 0;
+       l->rst_cnt = 0;
        l->stats.recv_info = 0;
        l->stale_count = 0;
        l->bc_peer_is_up = false;
@@ -1110,12 +1106,12 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
        return released;
 }
 
-/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
+/* tipc_link_build_state_msg: prepare link state message for transmission
  *
  * Note that sending of broadcast ack is coordinated among nodes, to reduce
  * risk of ack storms towards the sender
  */
-int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
        if (!l)
                return 0;
@@ -1140,11 +1136,17 @@ int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
        int mtyp = RESET_MSG;
+       struct sk_buff *skb;
 
        if (l->state == LINK_ESTABLISHING)
                mtyp = ACTIVATE_MSG;
 
        tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
+
+       /* Inform peer that this endpoint is going down if applicable */
+       skb = skb_peek_tail(xmitq);
+       if (skb && (l->state == LINK_RESET))
+               msg_set_peer_stopping(buf_msg(skb), 1);
 }
 
 /* tipc_link_build_nack_msg: prepare link nack message for transmission
@@ -1219,7 +1221,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (!tipc_data_input(l, skb, l->inputq))
                        rc |= tipc_link_input(l, skb, l->inputq);
                if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
-                       rc |= tipc_link_build_ack_msg(l, xmitq);
+                       rc |= tipc_link_build_state_msg(l, xmitq);
                if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
                        break;
        } while ((skb = __skb_dequeue(defq)));
@@ -1411,7 +1413,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                        l->priority = peers_prio;
 
                /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
-               if ((mtyp == RESET_MSG) || !link_is_up(l))
+               if (msg_peer_stopping(hdr))
+                       rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               else if ((mtyp == RESET_MSG) || !link_is_up(l))
                        rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
 
                /* ACTIVATE_MSG takes up link if it was already locally reset */
index 6a94175ee20a983bbf6816f6c488a54066d85e78..d7e9d42fcb2d11855cd013382dfeef0ba7efd28a 100644 (file)
@@ -123,7 +123,7 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                  struct sk_buff_head *xmitq);
-int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
                           struct tipc_link *uc_l,
                           struct sk_buff_head *xmitq);
index f34f639df6435fc222e6118b108b6711c190ee6c..024da8af91f0edf55e78c30edc682fe5741c6198 100644 (file)
@@ -715,6 +715,16 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
        msg_set_bits(m, 5, 12, 0x1, r);
 }
 
+static inline u32 msg_peer_stopping(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 13, 0x1);
+}
+
+static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
+{
+       msg_set_bits(m, 5, 13, 0x1, s);
+}
+
 static inline char *msg_media_addr(struct tipc_msg *m)
 {
        return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
@@ -733,16 +743,26 @@ static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
        msg_set_bits(m, 9, 16, 0xffff, n);
 }
 
-static inline u32 msg_bcast_tag(struct tipc_msg *m)
+static inline u32 msg_conn_ack(struct tipc_msg *m)
 {
        return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
+static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n)
 {
        msg_set_bits(m, 9, 16, 0xffff, n);
 }
 
+static inline u32 msg_adv_win(struct tipc_msg *m)
+{
+       return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_adv_win(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
 static inline u32 msg_max_pkt(struct tipc_msg *m)
 {
        return msg_bits(m, 9, 16, 0xffff) * 4;
index ebe9d0ff6e9e9220621676e3384df3a28477d7a9..6b626a64b5179e9b7d8fc909be203bdf6facfbdd 100644 (file)
 
 int sysctl_tipc_named_timeout __read_mostly = 2000;
 
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
 struct distr_queue_item {
        struct distr_item i;
        u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
        kfree_rcu(p, rcu);
 }
 
+/**
+ * tipc_dist_queue_purge - remove deferred updates from a node that went down
+ */
+static void tipc_dist_queue_purge(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct distr_queue_item *e, *tmp;
+
+       spin_lock_bh(&tn->nametbl_lock);
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
+               if (e->node != addr)
+                       continue;
+               list_del(&e->next);
+               kfree(e);
+       }
+       spin_unlock_bh(&tn->nametbl_lock);
+}
+
 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
                tipc_publ_purge(net, publ, addr);
+       tipc_dist_queue_purge(net, addr);
 }
 
 /**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
  * tipc_named_add_backlog - add a failed name table update to the backlog
  *
  */
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+                                  u32 type, u32 node)
 {
        struct distr_queue_item *e;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        unsigned long now = get_jiffies_64();
 
        e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
        e->node = node;
        e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
        memcpy(e, i, sizeof(*i));
-       list_add_tail(&e->next, &tipc_dist_queue);
+       list_add_tail(&e->next, &tn->dist_queue);
 }
 
 /**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
 void tipc_named_process_backlog(struct net *net)
 {
        struct distr_queue_item *e, *tmp;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr[16];
        unsigned long now = get_jiffies_64();
 
-       list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
                if (time_after(e->expires, now)) {
                        if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
                                continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
                node = msg_orignode(msg);
                while (count--) {
                        if (!tipc_update_nametbl(net, item, node, mtype))
-                               tipc_named_add_backlog(item, mtype, node);
+                               tipc_named_add_backlog(net, item, mtype, node);
                        item++;
                }
                kfree_skb(skb);
index ace178fd385038523bc498222e9497245ccae4b0..d903f560e2fd28086febe52fbf216170b9ee2e19 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -191,6 +191,20 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
        tipc_node_put(n);
        return mtu;
 }
+
+u16 tipc_node_get_capabilities(struct net *net, u32 addr)
+{
+       struct tipc_node *n;
+       u16 caps;
+
+       n = tipc_node_find(net, addr);
+       if (unlikely(!n))
+               return TIPC_NODE_CAPABILITIES;
+       caps = n->capabilities;
+       tipc_node_put(n);
+       return caps;
+}
+
 /*
  * A trivial power-of-two bitmask technique is used for speed, since this
  * operation is done for every incoming TIPC packet. The number of hash table
@@ -304,8 +318,11 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
 
        spin_lock_bh(&tn->node_list_lock);
        n = tipc_node_find(net, addr);
-       if (n)
+       if (n) {
+               /* Same node may come back with new capabilities */
+               n->capabilities = capabilities;
                goto exit;
+       }
        n = kzalloc(sizeof(*n), GFP_ATOMIC);
        if (!n) {
                pr_warn("Node creation failed, no memory\n");
@@ -545,12 +562,16 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
        pr_debug("Established link <%s> on network plane %c\n",
                 tipc_link_name(nl), tipc_link_plane(nl));
 
+       /* Ensure that a STATE message goes first */
+       tipc_link_build_state_msg(nl, xmitq);
+
        /* First link? => give it both slots */
        if (!ol) {
                *slot0 = bearer_id;
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
+               tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
                return;
        }
@@ -581,8 +602,12 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
                              struct sk_buff_head *xmitq)
 {
+       struct tipc_media_addr *maddr;
+
        tipc_node_write_lock(n);
        __tipc_node_link_up(n, bearer_id, xmitq);
+       maddr = &n->links[bearer_id].maddr;
+       tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
        tipc_node_write_unlock(n);
 }
 
@@ -1279,7 +1304,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
        /* Broadcast ACKs are sent on a unicast link */
        if (rc & TIPC_LINK_SND_BC_ACK) {
                tipc_node_read_lock(n);
-               tipc_link_build_ack_msg(le->link, &xmitq);
+               tipc_link_build_state_msg(le->link, &xmitq);
                tipc_node_read_unlock(n);
        }
 
@@ -1444,6 +1469,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        int bearer_id = b->identity;
        struct tipc_link_entry *le;
        u16 bc_ack = msg_bcast_ack(hdr);
+       u32 self = tipc_own_addr(net);
        int rc = 0;
 
        __skb_queue_head_init(&xmitq);
@@ -1460,6 +1486,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
                        return tipc_node_bc_rcv(net, skb, bearer_id);
        }
 
+       /* Discard unicast link messages destined for another node */
+       if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+               goto discard;
+
        /* Locate neighboring node that sent packet */
        n = tipc_node_find(net, msg_prevnode(hdr));
        if (unlikely(!n))
index f39d9d06e8bb0283f52562d17f67a5822d6797bd..8264b3d97dc45fe82110669d90dc48d481b2a6fa 100644 (file)
 /* Optional capabilities supported by this code version
  */
 enum {
-       TIPC_BCAST_SYNCH = (1 << 1)
+       TIPC_BCAST_SYNCH   = (1 << 1),
+       TIPC_BLOCK_FLOWCTL = (2 << 1)
 };
 
-#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
+#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | TIPC_BLOCK_FLOWCTL)
 #define INVALID_BEARER_ID -1
 
 void tipc_node_stop(struct net *net);
@@ -70,6 +71,7 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+u16 tipc_node_get_capabilities(struct net *net, u32 addr);
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
index 2446bfbaa309284e9d23dd590650e1576ec6b072..7a0af2dc0406b8c1bf64315764f43dba5e451fe0 100644 (file)
@@ -86,6 +86,7 @@ struct outqueue_entry {
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
+static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
@@ -102,6 +103,7 @@ static void tipc_conn_kref_release(struct kref *kref)
                }
                saddr->scope = -TIPC_NODE_SCOPE;
                kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
+               tipc_sock_release(con);
                sock_release(sock);
                con->sock = NULL;
        }
@@ -184,26 +186,31 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
+static void tipc_sock_release(struct tipc_conn *con)
+{
+       struct tipc_server *s = con->server;
+
+       if (con->conid)
+               s->tipc_conn_release(con->conid, con->usr_data);
+
+       tipc_unregister_callbacks(con);
+}
+
 static void tipc_close_conn(struct tipc_conn *con)
 {
        struct tipc_server *s = con->server;
 
        if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
-               if (con->conid)
-                       s->tipc_conn_shutdown(con->conid, con->usr_data);
 
                spin_lock_bh(&s->idr_lock);
                idr_remove(&s->conn_idr, con->conid);
                s->idr_in_use--;
                spin_unlock_bh(&s->idr_lock);
 
-               tipc_unregister_callbacks(con);
-
                /* We shouldn't flush pending works as we may be in the
                 * thread. In fact the races with pending rx/tx work structs
                 * are harmless for us here as we have already deleted this
-                * connection from server connection list and set
-                * sk->sk_user_data to 0 before releasing connection object.
+                * connection from server connection list.
                 */
                kernel_sock_shutdown(con->sock, SHUT_RDWR);
 
index 9015faedb1b0e7c9f4ea18b293aa3dc8b16f4bf3..34f8055afa3b77f41a6002a1f2d75cf6a674e4be 100644 (file)
@@ -53,7 +53,7 @@
  * @send_wq: send workqueue
  * @max_rcvbuf_size: maximum permitted receive message length
  * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_shutdown: callback will be called when connection is shut down
+ * @tipc_conn_release: callback will be called before releasing the connection
  * @tipc_conn_recvmsg: callback will be called when message arrives
  * @saddr: TIPC server address
  * @name: server name
@@ -70,7 +70,7 @@ struct tipc_server {
        struct workqueue_struct *send_wq;
        int max_rcvbuf_size;
        void *(*tipc_conn_new)(int conid);
-       void (*tipc_conn_shutdown)(int conid, void *usr_data);
+       void (*tipc_conn_release)(int conid, void *usr_data);
        void (*tipc_conn_recvmsg)(struct net *net, int conid,
                                  struct sockaddr_tipc *addr, void *usr_data,
                                  void *buf, size_t len);
index 3eeb50a27b89b6d9607b51b80a5ed5ce715235e4..12628890c2190b9cff66a12d2d2169c1192be761 100644 (file)
@@ -96,8 +96,11 @@ struct tipc_sock {
        uint conn_timeout;
        atomic_t dupl_rcvcnt;
        bool link_cong;
-       uint sent_unacked;
-       uint rcv_unacked;
+       u16 snt_unacked;
+       u16 snd_win;
+       u16 peer_caps;
+       u16 rcv_unacked;
+       u16 rcv_win;
        struct sockaddr_tipc remote;
        struct rhash_head node;
        struct rcu_head rcu;
@@ -227,9 +230,29 @@ static struct tipc_sock *tipc_sk(const struct sock *sk)
        return container_of(sk, struct tipc_sock, sk);
 }
 
-static int tsk_conn_cong(struct tipc_sock *tsk)
+static bool tsk_conn_cong(struct tipc_sock *tsk)
 {
-       return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
+       return tsk->snt_unacked >= tsk->snd_win;
+}
+
+/* tsk_blocks(): translate a buffer size in bytes to number of
+ * advertisable blocks, taking into account the ratio truesize(len)/len
+ * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
+ */
+static u16 tsk_adv_blocks(int len)
+{
+       return len / FLOWCTL_BLK_SZ / 4;
+}
+
+/* tsk_inc(): increment counter for sent or received data
+ * - If block based flow control is not supported by peer we
+ *   fall back to message based ditto, incrementing the counter
+ */
+static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
+{
+       if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+               return ((msglen / FLOWCTL_BLK_SZ) + 1);
+       return 1;
 }
 
 /**
@@ -377,9 +400,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sk->sk_write_space = tipc_write_space;
        sk->sk_destruct = tipc_sock_destruct;
        tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
-       tsk->sent_unacked = 0;
        atomic_set(&tsk->dupl_rcvcnt, 0);
 
+       /* Start out with safe limits until we receive an advertised window */
+       tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
+       tsk->rcv_win = tsk->snd_win;
+
        if (sock->state == SS_READY) {
                tsk_set_unreturnable(tsk, true);
                if (sock->type == SOCK_DGRAM)
@@ -775,7 +801,7 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
        struct sock *sk = &tsk->sk;
        struct tipc_msg *hdr = buf_msg(skb);
        int mtyp = msg_type(hdr);
-       int conn_cong;
+       bool conn_cong;
 
        /* Ignore if connection cannot be validated: */
        if (!tsk_peer_msg(tsk, hdr))
@@ -789,7 +815,9 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
                return;
        } else if (mtyp == CONN_ACK) {
                conn_cong = tsk_conn_cong(tsk);
-               tsk->sent_unacked -= msg_msgcnt(hdr);
+               tsk->snt_unacked -= msg_conn_ack(hdr);
+               if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+                       tsk->snd_win = msg_adv_win(hdr);
                if (conn_cong)
                        sk->sk_write_space(sk);
        } else if (mtyp != CONN_PROBE_REPLY) {
@@ -1020,12 +1048,14 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
        u32 dnode;
        uint mtu, send, sent = 0;
        struct iov_iter save;
+       int hlen = MIN_H_SIZE;
 
        /* Handle implied connection establishment */
        if (unlikely(dest)) {
                rc = __tipc_sendmsg(sock, m, dsz);
+               hlen = msg_hdr_sz(mhdr);
                if (dsz && (dsz == rc))
-                       tsk->sent_unacked = 1;
+                       tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
                return rc;
        }
        if (dsz > (uint)INT_MAX)
@@ -1054,7 +1084,7 @@ next:
                if (likely(!tsk_conn_cong(tsk))) {
                        rc = tipc_node_xmit(net, &pktchain, dnode, portid);
                        if (likely(!rc)) {
-                               tsk->sent_unacked++;
+                               tsk->snt_unacked += tsk_inc(tsk, send + hlen);
                                sent += send;
                                if (sent == dsz)
                                        return dsz;
@@ -1118,6 +1148,13 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
        tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
        tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+       tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+       if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+               return;
+
+       /* Fall back to message based flow control */
+       tsk->rcv_win = FLOWCTL_MSG_WIN;
+       tsk->snd_win = FLOWCTL_MSG_WIN;
 }
 
 /**
@@ -1214,7 +1251,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
        return 0;
 }
 
-static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
+static void tipc_sk_send_ack(struct tipc_sock *tsk)
 {
        struct net *net = sock_net(&tsk->sk);
        struct sk_buff *skb = NULL;
@@ -1230,7 +1267,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
        if (!skb)
                return;
        msg = buf_msg(skb);
-       msg_set_msgcnt(msg, ack);
+       msg_set_conn_ack(msg, tsk->rcv_unacked);
+       tsk->rcv_unacked = 0;
+
+       /* Adjust to and advertize the correct window limit */
+       if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
+               tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
+               msg_set_adv_win(msg, tsk->rcv_win);
+       }
        tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
@@ -1288,7 +1332,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
        long timeo;
        unsigned int sz;
        u32 err;
-       int res;
+       int res, hlen;
 
        /* Catch invalid receive requests */
        if (unlikely(!buf_len))
@@ -1313,6 +1357,7 @@ restart:
        buf = skb_peek(&sk->sk_receive_queue);
        msg = buf_msg(buf);
        sz = msg_data_sz(msg);
+       hlen = msg_hdr_sz(msg);
        err = msg_errcode(msg);
 
        /* Discard an empty non-errored message & try again */
@@ -1335,7 +1380,7 @@ restart:
                        sz = buf_len;
                        m->msg_flags |= MSG_TRUNC;
                }
-               res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
+               res = skb_copy_datagram_msg(buf, hlen, m, sz);
                if (res)
                        goto exit;
                res = sz;
@@ -1347,15 +1392,15 @@ restart:
                        res = -ECONNRESET;
        }
 
-       /* Consume received message (optional) */
-       if (likely(!(flags & MSG_PEEK))) {
-               if ((sock->state != SS_READY) &&
-                   (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
-                       tipc_sk_send_ack(tsk, tsk->rcv_unacked);
-                       tsk->rcv_unacked = 0;
-               }
-               tsk_advance_rx_queue(sk);
+       if (unlikely(flags & MSG_PEEK))
+               goto exit;
+
+       if (likely(sock->state != SS_READY)) {
+               tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+               if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
+                       tipc_sk_send_ack(tsk);
        }
+       tsk_advance_rx_queue(sk);
 exit:
        release_sock(sk);
        return res;
@@ -1384,7 +1429,7 @@ static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
        int sz_to_copy, target, needed;
        int sz_copied = 0;
        u32 err;
-       int res = 0;
+       int res = 0, hlen;
 
        /* Catch invalid receive attempts */
        if (unlikely(!buf_len))
@@ -1410,6 +1455,7 @@ restart:
        buf = skb_peek(&sk->sk_receive_queue);
        msg = buf_msg(buf);
        sz = msg_data_sz(msg);
+       hlen = msg_hdr_sz(msg);
        err = msg_errcode(msg);
 
        /* Discard an empty non-errored message & try again */
@@ -1434,8 +1480,7 @@ restart:
                needed = (buf_len - sz_copied);
                sz_to_copy = (sz <= needed) ? sz : needed;
 
-               res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
-                                           m, sz_to_copy);
+               res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
                if (res)
                        goto exit;
 
@@ -1457,20 +1502,18 @@ restart:
                        res = -ECONNRESET;
        }
 
-       /* Consume received message (optional) */
-       if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
-                       tipc_sk_send_ack(tsk, tsk->rcv_unacked);
-                       tsk->rcv_unacked = 0;
-               }
-               tsk_advance_rx_queue(sk);
-       }
+       if (unlikely(flags & MSG_PEEK))
+               goto exit;
+
+       tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+       if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
+               tipc_sk_send_ack(tsk);
+       tsk_advance_rx_queue(sk);
 
        /* Loop around if more data is required */
        if ((sz_copied < buf_len) &&    /* didn't get all requested data */
            (!skb_queue_empty(&sk->sk_receive_queue) ||
            (sz_copied < target)) &&    /* and more is ready or required */
-           (!(flags & MSG_PEEK)) &&    /* and aren't just peeking at data */
            (!err))                     /* and haven't reached a FIN */
                goto restart;
 
@@ -1602,30 +1645,33 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 /**
  * rcvbuf_limit - get proper overload limit of socket receive queue
  * @sk: socket
- * @buf: message
+ * @skb: message
  *
- * For all connection oriented messages, irrespective of importance,
- * the default overload value (i.e. 67MB) is set as limit.
+ * For connection oriented messages, irrespective of importance,
+ * default queue limit is 2 MB.
  *
- * For all connectionless messages, by default new queue limits are
- * as belows:
+ * For connectionless messages, queue limits are based on message
+ * importance as follows:
  *
- * TIPC_LOW_IMPORTANCE       (4 MB)
- * TIPC_MEDIUM_IMPORTANCE    (8 MB)
- * TIPC_HIGH_IMPORTANCE      (16 MB)
- * TIPC_CRITICAL_IMPORTANCE  (32 MB)
+ * TIPC_LOW_IMPORTANCE       (2 MB)
+ * TIPC_MEDIUM_IMPORTANCE    (4 MB)
+ * TIPC_HIGH_IMPORTANCE      (8 MB)
+ * TIPC_CRITICAL_IMPORTANCE  (16 MB)
  *
  * Returns overload limit according to corresponding message importance
  */
-static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
+static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_msg *hdr = buf_msg(skb);
+
+       if (unlikely(!msg_connected(hdr)))
+               return sk->sk_rcvbuf << msg_importance(hdr);
 
-       if (msg_connected(msg))
-               return sysctl_tipc_rmem[2];
+       if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+               return sk->sk_rcvbuf;
 
-       return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
-               msg_importance(msg);
+       return FLOWCTL_MSG_LIM;
 }
 
 /**
@@ -1748,7 +1794,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
 
                /* Try backlog, compensating for double-counted bytes */
                dcnt = &tipc_sk(sk)->dupl_rcvcnt;
-               if (sk->sk_backlog.len)
+               if (!sk->sk_backlog.len)
                        atomic_set(dcnt, 0);
                lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
                if (likely(!sk_add_backlog(sk, skb, lim)))
index 4241f22069dc93270f9760c2f30ec9d36ede84d7..06fb5944cf76947cab964fbcf2b53b52b1f517aa 100644 (file)
@@ -1,6 +1,6 @@
 /* net/tipc/socket.h: Include file for TIPC socket code
  *
- * Copyright (c) 2014-2015, Ericsson AB
+ * Copyright (c) 2014-2016, Ericsson AB
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include <net/sock.h>
 #include <net/genetlink.h>
 
-#define TIPC_CONNACK_INTV         256
-#define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
-#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
-                                 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
+/* Compatibility values for deprecated message based flow control */
+#define FLOWCTL_MSG_WIN 512
+#define FLOWCTL_MSG_LIM ((FLOWCTL_MSG_WIN * 2 + 1) * SKB_TRUESIZE(MAX_MSG_SIZE))
+
+#define FLOWCTL_BLK_SZ 1024
+
+/* Socket receive buffer sizes */
+#define RCVBUF_MIN  (FLOWCTL_BLK_SZ * 512)
+#define RCVBUF_DEF  (FLOWCTL_BLK_SZ * 1024 * 2)
+#define RCVBUF_MAX  (FLOWCTL_BLK_SZ * 1024 * 16)
+
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
index e6cb386fbf3469017f805e5db135d04b86a30d78..0dd02244e21d72b8e53e371d51eeae53e4b15a41 100644 (file)
@@ -302,7 +302,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 }
 
 /* Handle one termination request for the subscriber */
-static void tipc_subscrb_shutdown_cb(int conid, void *usr_data)
+static void tipc_subscrb_release_cb(int conid, void *usr_data)
 {
        tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
 }
@@ -326,8 +326,7 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
                return tipc_subscrp_cancel(s, subscriber);
        }
 
-       if (s)
-               tipc_subscrp_subscribe(net, s, subscriber, swap);
+       tipc_subscrp_subscribe(net, s, subscriber, swap);
 }
 
 /* Handle one request to establish a new subscriber */
@@ -365,7 +364,7 @@ int tipc_topsrv_start(struct net *net)
        topsrv->max_rcvbuf_size         = sizeof(struct tipc_subscr);
        topsrv->tipc_conn_recvmsg       = tipc_subscrb_rcv_cb;
        topsrv->tipc_conn_new           = tipc_subscrb_connect_cb;
-       topsrv->tipc_conn_shutdown      = tipc_subscrb_shutdown_cb;
+       topsrv->tipc_conn_release       = tipc_subscrb_release_cb;
 
        strncpy(topsrv->name, name, strlen(name) + 1);
        tn->topsrv = topsrv;
index 3dce53ebea9240fd2f46f9c428170a2e6c345d7b..b5f1221f48d4859156aa640066e1fd80cf2927dc 100644 (file)
@@ -1808,27 +1808,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        else if (sk->sk_shutdown & RCV_SHUTDOWN)
                err = 0;
 
-       if (copied > 0) {
-               /* We only do these additional bookkeeping/notification steps
-                * if we actually copied something out of the queue pair
-                * instead of just peeking ahead.
-                */
-
-               if (!(flags & MSG_PEEK)) {
-                       /* If the other side has shutdown for sending and there
-                        * is nothing more to read, then modify the socket
-                        * state.
-                        */
-                       if (vsk->peer_shutdown & SEND_SHUTDOWN) {
-                               if (vsock_stream_has_data(vsk) <= 0) {
-                                       sk->sk_state = SS_UNCONNECTED;
-                                       sock_set_flag(sk, SOCK_DONE);
-                                       sk->sk_state_change(sk);
-                               }
-                       }
-               }
+       if (copied > 0)
                err = copied;
-       }
 
 out:
        release_sock(sk);
index 0a369bb440e77e03262ae48a7e3519db35c87835..4120b7a538be933dae935a6785e6d20ecf1afa32 100644 (file)
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
         * qp_handle.
         */
        if (vmci_handle_is_invalid(e_payload->handle) ||
-           vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+           !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
                return;
 
        /* We don't ask for delayed CBs when we subscribe to this event (we
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
        /* Retrieve the head sk_buff from the socket's receive queue. */
        err = 0;
        skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
-       if (err)
-               return err;
-
        if (!skb)
-               return -EAGAIN;
+               return err;
 
        dg = (struct vmci_datagram *)skb->data;
        if (!dg)
@@ -2054,7 +2051,7 @@ static u32 vmci_transport_get_local_cid(void)
        return vmci_get_context_id();
 }
 
-static struct vsock_transport vmci_transport = {
+static const struct vsock_transport vmci_transport = {
        .init = vmci_transport_socket_init,
        .destruct = vmci_transport_destruct,
        .release = vmci_transport_release,
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.2.0-k");
+MODULE_VERSION("1.0.4.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index 59cabc9bce693f5f39f1d2db8f436a18ac2efcaa..a6631fb319c1ed06cfd6be2f215c5f05febfd0a4 100644 (file)
@@ -768,7 +768,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
                if (chan == other_chan)
                        return true;
 
-               if (chan->band != IEEE80211_BAND_5GHZ)
+               if (chan->band != NL80211_BAND_5GHZ)
                        continue;
 
                r1 = cfg80211_get_unii(chan->center_freq);
index 5327e4b974fa10ea4d9fc242d46e04bc626542c2..7f7b9409bf4c2f5cc76f6411eb4de73fd5032c89 100644 (file)
@@ -557,7 +557,7 @@ int wiphy_register(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        int res;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        bool have_band = false;
        int i;
@@ -647,7 +647,7 @@ int wiphy_register(struct wiphy *wiphy)
                return res;
 
        /* sanity check supported bands/channels */
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
@@ -659,7 +659,7 @@ int wiphy_register(struct wiphy *wiphy)
                 * on 60GHz band, there are no legacy rates, so
                 * n_bitrates is 0
                 */
-               if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
+               if (WARN_ON(band != NL80211_BAND_60GHZ &&
                            !sband->n_bitrates))
                        return -EINVAL;
 
@@ -669,7 +669,7 @@ int wiphy_register(struct wiphy *wiphy)
                 * global structure for that.
                 */
                if (cfg80211_disable_40mhz_24ghz &&
-                   band == IEEE80211_BAND_2GHZ &&
+                   band == NL80211_BAND_2GHZ &&
                    sband->ht_cap.ht_supported) {
                        sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
                        sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
index 454157717efaf8654cae9cdee39e827381dd20c3..5d453916a4179877ff2f7d0b45f908905b4e5d22 100644 (file)
@@ -69,7 +69,7 @@ static ssize_t ht40allow_map_read(struct file *file,
        struct wiphy *wiphy = file->private_data;
        char *buf;
        unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
 
        buf = kzalloc(buf_size, GFP_KERNEL);
@@ -78,7 +78,7 @@ static ssize_t ht40allow_map_read(struct file *file,
 
        rtnl_lock();
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
index 4c55fab9b4e46c68ee480eb71aec969088aa7920..4a4dda53bdf140e1142987410033ab5a4d61a497 100644 (file)
@@ -104,7 +104,7 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
                struct ieee80211_supported_band *sband =
                        rdev->wiphy.bands[params->chandef.chan->band];
                int j;
-               u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ?
+               u32 flag = params->chandef.chan->band == NL80211_BAND_5GHZ ?
                        IEEE80211_RATE_MANDATORY_A :
                        IEEE80211_RATE_MANDATORY_B;
 
@@ -236,7 +236,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
                            struct wireless_dev *wdev)
 {
        struct cfg80211_cached_keys *ck = NULL;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int i, err;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -248,7 +248,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
        if (!wdev->wext.ibss.chandef.chan) {
                struct ieee80211_channel *new_chan = NULL;
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
                        struct ieee80211_channel *chan;
 
index 092300b30c372ddc03115638a83bc65ce7a49997..fa2066b56f36c469e5686b7bea36a33d58bc13d3 100644 (file)
@@ -128,9 +128,9 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
 
        if (!setup->chandef.chan) {
                /* if we don't have that either, use the first usable channel */
-               enum ieee80211_band band;
+               enum nl80211_band band;
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
                        struct ieee80211_channel *chan;
                        int i;
index ff328250bc442db6a9e8e5136496098d4270bf6f..c284d883c349bf732d7fa0ad60615aff4abf0b4a 100644 (file)
@@ -726,7 +726,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
        wiphy = &rdev->wiphy;
 
        rtnl_lock();
-       for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
+       for (bandid = 0; bandid < NUM_NL80211_BANDS; bandid++) {
                sband = wiphy->bands[bandid];
                if (!sband)
                        continue;
index 4f45a2913104e42e692bde95a8e4ecb621767b15..afeb1ef1b1992a1fe2e4ff2772badeb1f7e83c24 100644 (file)
@@ -1277,7 +1277,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
        struct nlattr *nl_bands, *nl_band;
        struct nlattr *nl_freqs, *nl_freq;
        struct nlattr *nl_cmds;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_channel *chan;
        int i;
        const struct ieee80211_txrx_stypes *mgmt_stypes =
@@ -1410,7 +1410,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                        goto nla_put_failure;
 
                for (band = state->band_start;
-                    band < IEEE80211_NUM_BANDS; band++) {
+                    band < NUM_NL80211_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
 
                        sband = rdev->wiphy.bands[band];
@@ -1472,7 +1472,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                }
                nla_nest_end(msg, nl_bands);
 
-               if (band < IEEE80211_NUM_BANDS)
+               if (band < NUM_NL80211_BANDS)
                        state->band_start = band + 1;
                else
                        state->band_start = 0;
@@ -2429,7 +2429,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        rdev->devlist_generation ^
@@ -3493,7 +3494,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        }
 
        params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-       if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
                return -EOPNOTSUPP;
 
        wdev_lock(wdev);
@@ -5821,9 +5822,9 @@ static int validate_scan_freqs(struct nlattr *freqs)
        return n_channels;
 }
 
-static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b)
+static bool is_band_valid(struct wiphy *wiphy, enum nl80211_band b)
 {
-       return b < IEEE80211_NUM_BANDS && wiphy->bands[b];
+       return b < NUM_NL80211_BANDS && wiphy->bands[b];
 }
 
 static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
@@ -6018,10 +6019,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                        i++;
                }
        } else {
-               enum ieee80211_band band;
+               enum nl80211_band band;
 
                /* all channels */
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        int j;
                        if (!wiphy->bands[band])
                                continue;
@@ -6066,7 +6067,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                       request->ie_len);
        }
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+       for (i = 0; i < NUM_NL80211_BANDS; i++)
                if (wiphy->bands[i])
                        request->rates[i] =
                                (1 << wiphy->bands[i]->n_bitrates) - 1;
@@ -6075,9 +6076,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                nla_for_each_nested(attr,
                                    info->attrs[NL80211_ATTR_SCAN_SUPP_RATES],
                                    tmp) {
-                       enum ieee80211_band band = nla_type(attr);
+                       enum nl80211_band band = nla_type(attr);
 
-                       if (band < 0 || band >= IEEE80211_NUM_BANDS) {
+                       if (band < 0 || band >= NUM_NL80211_BANDS) {
                                err = -EINVAL;
                                goto out_free;
                        }
@@ -6265,7 +6266,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        struct cfg80211_sched_scan_request *request;
        struct nlattr *attr;
        int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        size_t ie_len;
        struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
        s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
@@ -6430,7 +6431,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                }
        } else {
                /* all channels */
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        int j;
                        if (!wiphy->bands[band])
                                continue;
@@ -6874,7 +6875,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
        if (wdev->netdev &&
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
                goto nla_put_failure;
-       if (nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+       if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        bss = nla_nest_start(msg, NL80211_ATTR_BSS);
@@ -6895,7 +6897,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
         */
        ies = rcu_dereference(res->ies);
        if (ies) {
-               if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
+               if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf,
+                                     NL80211_BSS_PAD))
                        goto fail_unlock_rcu;
                if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
                                        ies->len, ies->data))
@@ -6905,7 +6908,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
        /* and this pointer is always (unless driver didn't know) beacon data */
        ies = rcu_dereference(res->beacon_ies);
        if (ies && ies->from_beacon) {
-               if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf))
+               if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf,
+                                     NL80211_BSS_PAD))
                        goto fail_unlock_rcu;
                if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
                                        ies->len, ies->data))
@@ -6924,8 +6928,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
                goto nla_put_failure;
 
        if (intbss->ts_boottime &&
-           nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
-                       intbss->ts_boottime))
+           nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
+                             intbss->ts_boottime, NL80211_BSS_PAD))
                goto nla_put_failure;
 
        switch (rdev->wiphy.signal_type) {
@@ -7045,28 +7049,28 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME,
-                       survey->time))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME,
+                       survey->time, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME_BUSY) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY,
-                       survey->time_busy))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY,
+                             survey->time_busy, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
-                       survey->time_ext_busy))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
+                             survey->time_ext_busy, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME_RX) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX,
-                       survey->time_rx))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX,
+                             survey->time_rx, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME_TX) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX,
-                       survey->time_tx))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX,
+                             survey->time_tx, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
        if ((survey->filled & SURVEY_INFO_TIME_SCAN) &&
-           nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN,
-                       survey->time_scan))
+           nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN,
+                             survey->time_scan, NL80211_SURVEY_INFO_PAD))
                goto nla_put_failure;
 
        nla_nest_end(msg, infoattr);
@@ -7538,14 +7542,14 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
 
 static bool
 nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev,
-                        int mcast_rate[IEEE80211_NUM_BANDS],
+                        int mcast_rate[NUM_NL80211_BANDS],
                         int rateval)
 {
        struct wiphy *wiphy = &rdev->wiphy;
        bool found = false;
        int band, i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband;
 
                sband = wiphy->bands[band];
@@ -7725,7 +7729,7 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
-       int mcast_rate[IEEE80211_NUM_BANDS];
+       int mcast_rate[NUM_NL80211_BANDS];
        u32 nla_rate;
        int err;
 
@@ -7786,8 +7790,8 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
        }
 
        if (wdev) {
-               if (nla_put_u64(skb, NL80211_ATTR_WDEV,
-                               wdev_id(wdev)))
+               if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
+                                     wdev_id(wdev), NL80211_ATTR_PAD))
                        goto nla_put_failure;
                if (wdev->netdev &&
                    nla_put_u32(skb, NL80211_ATTR_IFINDEX,
@@ -8130,7 +8134,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
        }
 
        connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-       if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+       if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) {
                kzfree(connkeys);
                return -EOPNOTSUPP;
        }
@@ -8380,7 +8384,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
        if (err)
                goto free_msg;
 
-       if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+       if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
@@ -8550,7 +8555,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
 
        memset(&mask, 0, sizeof(mask));
        /* Default to all rates enabled */
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+       for (i = 0; i < NUM_NL80211_BANDS; i++) {
                sband = rdev->wiphy.bands[i];
 
                if (!sband)
@@ -8574,14 +8579,14 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
 
        /*
         * The nested attribute uses enum nl80211_band as the index. This maps
-        * directly to the enum ieee80211_band values used in cfg80211.
+        * directly to the enum nl80211_band values used in cfg80211.
         */
        BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
        nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
-               enum ieee80211_band band = nla_type(tx_rates);
+               enum nl80211_band band = nla_type(tx_rates);
                int err;
 
-               if (band < 0 || band >= IEEE80211_NUM_BANDS)
+               if (band < 0 || band >= NUM_NL80211_BANDS)
                        return -EINVAL;
                sband = rdev->wiphy.bands[band];
                if (sband == NULL)
@@ -8792,7 +8797,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                goto free_msg;
 
        if (msg) {
-               if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+               if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                                     NL80211_ATTR_PAD))
                        goto nla_put_failure;
 
                genlmsg_end(msg, hdr);
@@ -10078,7 +10084,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
        if (err)
                goto free_msg;
 
-       if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+       if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
@@ -10503,8 +10510,9 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
                        break;
 
                if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-                   (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV,
-                                        wdev_id(wdev)))) {
+                   (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
+                                              wdev_id(wdev),
+                                              NL80211_ATTR_PAD))) {
                        genlmsg_cancel(skb, hdr);
                        break;
                }
@@ -10746,7 +10754,7 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
         * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the
         * specification is not defined for them.
         */
-       if (chandef.chan->band == IEEE80211_BAND_2GHZ &&
+       if (chandef.chan->band == NL80211_BAND_2GHZ &&
            chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
            chandef.width != NL80211_CHAN_WIDTH_20)
                return -EINVAL;
@@ -11711,7 +11719,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
                                         wdev->netdev->ifindex)) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        /* ignore errors and send incomplete event anyway */
@@ -12378,11 +12387,13 @@ static void nl80211_send_remain_on_chan_event(
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
                                         wdev->netdev->ifindex)) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD) ||
            nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
            nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
                        NL80211_CHAN_NO_HT) ||
-           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+           nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
@@ -12616,7 +12627,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
                                        netdev->ifindex)) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD) ||
            nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
            (sig_dbm &&
             nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -12659,9 +12671,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
                                   netdev->ifindex)) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD) ||
            nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
-           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                             NL80211_ATTR_PAD) ||
            (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
                goto nla_put_failure;
 
@@ -13041,7 +13055,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
                struct wireless_dev *wdev = netdev->ieee80211_ptr;
 
                if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-                   nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+                   nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                                     NL80211_ATTR_PAD))
                        goto nla_put_failure;
        }
 
@@ -13086,7 +13101,8 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
-           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+           nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+                             NL80211_ATTR_PAD) ||
            (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
                goto nla_put_failure;
 
@@ -13231,7 +13247,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
                goto free_msg;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD))
                goto free_msg;
 
        if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
@@ -13372,7 +13389,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
        struct wireless_dev *wdev;
        struct cfg80211_beacon_registration *reg, *tmp;
 
-       if (state != NETLINK_URELEASE)
+       if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
                return NOTIFY_DONE;
 
        rcu_read_lock();
@@ -13506,7 +13523,8 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD))
                goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
@@ -13539,7 +13557,8 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev)
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
-           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+           nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+                             NL80211_ATTR_PAD))
                goto out;
 
        genlmsg_end(msg, hdr);
index 8ae0c04f9fc7d476919be7dc3b4e6c8e9a528496..85ff30bee2b921c3a132f212d3d4898b2da26660 100644 (file)
@@ -1048,7 +1048,7 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
 static inline int
 rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
                    struct net_device *dev,
-                   int mcast_rate[IEEE80211_NUM_BANDS])
+                   int mcast_rate[NUM_NL80211_BANDS])
 {
        int ret = -ENOTSUPP;
 
index c5fb317eee68f15a4665a090f7aee16b08ec6765..e271dea6bc0226562b018ff788030a61dcec1289 100644 (file)
@@ -1546,12 +1546,12 @@ static void reg_process_ht_flags_band(struct wiphy *wiphy,
 
 static void reg_process_ht_flags(struct wiphy *wiphy)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        if (!wiphy)
                return;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+       for (band = 0; band < NUM_NL80211_BANDS; band++)
                reg_process_ht_flags_band(wiphy, wiphy->bands[band]);
 }
 
@@ -1673,7 +1673,7 @@ static void reg_check_channels(void)
 static void wiphy_update_regulatory(struct wiphy *wiphy,
                                    enum nl80211_reg_initiator initiator)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct regulatory_request *lr = get_last_request();
 
        if (ignore_reg_update(wiphy, initiator)) {
@@ -1690,7 +1690,7 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
 
        lr->dfs_region = get_cfg80211_regdom()->dfs_region;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+       for (band = 0; band < NUM_NL80211_BANDS; band++)
                handle_band(wiphy, initiator, wiphy->bands[band]);
 
        reg_process_beacons(wiphy);
@@ -1786,14 +1786,14 @@ static void handle_band_custom(struct wiphy *wiphy,
 void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
                                   const struct ieee80211_regdomain *regd)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        unsigned int bands_set = 0;
 
        WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG),
             "wiphy should have REGULATORY_CUSTOM_REG\n");
        wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                if (!wiphy->bands[band])
                        continue;
                handle_band_custom(wiphy, wiphy->bands[band], regd);
@@ -2228,7 +2228,7 @@ static void reg_process_self_managed_hints(void)
        struct wiphy *wiphy;
        const struct ieee80211_regdomain *tmp;
        const struct ieee80211_regdomain *regd;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct regulatory_request request = {};
 
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
@@ -2246,7 +2246,7 @@ static void reg_process_self_managed_hints(void)
                rcu_assign_pointer(wiphy->regd, regd);
                rcu_free_regdom(tmp);
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+               for (band = 0; band < NUM_NL80211_BANDS; band++)
                        handle_band_custom(wiphy, wiphy->bands[band], regd);
 
                reg_process_ht_flags(wiphy);
@@ -2404,7 +2404,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
 }
 EXPORT_SYMBOL(regulatory_hint);
 
-void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band,
+void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band,
                                const u8 *country_ie, u8 country_ie_len)
 {
        char alpha2[2];
@@ -2504,11 +2504,11 @@ static void restore_alpha2(char *alpha2, bool reset_user)
 static void restore_custom_reg_settings(struct wiphy *wiphy)
 {
        struct ieee80211_supported_band *sband;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_channel *chan;
        int i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
@@ -2623,9 +2623,9 @@ void regulatory_hint_disconnect(void)
 
 static bool freq_is_chan_12_13_14(u16 freq)
 {
-       if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
-           freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
-           freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
+       if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ))
                return true;
        return false;
 }
@@ -2650,7 +2650,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
 
        if (beacon_chan->beacon_found ||
            beacon_chan->flags & IEEE80211_CHAN_RADAR ||
-           (beacon_chan->band == IEEE80211_BAND_2GHZ &&
+           (beacon_chan->band == NL80211_BAND_2GHZ &&
             !freq_is_chan_12_13_14(beacon_chan->center_freq)))
                return 0;
 
index 9f495d76eca075d6f779078196f9adddd98a34aa..f6ced316b5a49e204632ef42675309614237cc54 100644 (file)
@@ -104,7 +104,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
  * information for a band the BSS is not present in it will be ignored.
  */
 void regulatory_hint_country_ie(struct wiphy *wiphy,
-                        enum ieee80211_band band,
+                        enum nl80211_band band,
                         const u8 *country_ie,
                         u8 country_ie_len);
 
index 50ea8e3fcbeb022dd4062d04d7f562c0cf750b52..abdf651a70d9a26b442b7de9d0b9b5bd2d938166 100644 (file)
@@ -531,7 +531,7 @@ static int cmp_bss(struct cfg80211_bss *a,
 }
 
 static bool cfg80211_bss_type_match(u16 capability,
-                                   enum ieee80211_band band,
+                                   enum nl80211_band band,
                                    enum ieee80211_bss_type bss_type)
 {
        bool ret = true;
@@ -540,7 +540,7 @@ static bool cfg80211_bss_type_match(u16 capability,
        if (bss_type == IEEE80211_BSS_TYPE_ANY)
                return ret;
 
-       if (band == IEEE80211_BAND_60GHZ) {
+       if (band == NL80211_BAND_60GHZ) {
                mask = WLAN_CAPABILITY_DMG_TYPE_MASK;
                switch (bss_type) {
                case IEEE80211_BSS_TYPE_ESS:
@@ -1006,7 +1006,7 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
        if (!res)
                return NULL;
 
-       if (channel->band == IEEE80211_BAND_60GHZ) {
+       if (channel->band == NL80211_BAND_60GHZ) {
                bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
                if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
                    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
@@ -1089,7 +1089,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
        if (!res)
                return NULL;
 
-       if (channel->band == IEEE80211_BAND_60GHZ) {
+       if (channel->band == NL80211_BAND_60GHZ) {
                bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
                if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
                    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
@@ -1185,7 +1185,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        struct iw_scan_req *wreq = NULL;
        struct cfg80211_scan_request *creq = NULL;
        int i, err, n_channels = 0;
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
        if (!netif_running(dev))
                return -ENETDOWN;
@@ -1229,7 +1229,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
 
        /* translate "Scan on frequencies" request */
        i = 0;
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                int j;
 
                if (!wiphy->bands[band])
@@ -1289,7 +1289,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                        creq->n_ssids = 0;
        }
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+       for (i = 0; i < NUM_NL80211_BANDS; i++)
                if (wiphy->bands[i])
                        creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
index 1fba41676428ce5e3a54d797cc494d06e432ebaa..e22e5b83cfa94a94ea6ea302beaebfa07217c067 100644 (file)
@@ -81,7 +81,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
                return -ENOMEM;
 
        if (wdev->conn->params.channel) {
-               enum ieee80211_band band = wdev->conn->params.channel->band;
+               enum nl80211_band band = wdev->conn->params.channel->band;
                struct ieee80211_supported_band *sband =
                        wdev->wiphy->bands[band];
 
@@ -93,11 +93,11 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
                request->rates[band] = (1 << sband->n_bitrates) - 1;
        } else {
                int i = 0, j;
-               enum ieee80211_band band;
+               enum nl80211_band band;
                struct ieee80211_supported_band *bands;
                struct ieee80211_channel *channel;
 
-               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = 0; band < NUM_NL80211_BANDS; band++) {
                        bands = wdev->wiphy->bands[band];
                        if (!bands)
                                continue;
index 8da1fae23cfb5e36eb60b6dcdebc63295acb338d..3c1091ae6c36c3a69f25c3e287d4c45e3a72d45f 100644 (file)
                                conf->dot11MeshHWMPconfirmationInterval;      \
        } while (0)
 
-#define CHAN_ENTRY __field(enum ieee80211_band, band) \
+#define CHAN_ENTRY __field(enum nl80211_band, band) \
                   __field(u16, center_freq)
 #define CHAN_ASSIGN(chan)                                        \
        do {                                                      \
 #define CHAN_PR_FMT "band: %d, freq: %u"
 #define CHAN_PR_ARG __entry->band, __entry->center_freq
 
-#define CHAN_DEF_ENTRY __field(enum ieee80211_band, band)              \
+#define CHAN_DEF_ENTRY __field(enum nl80211_band, band)                \
                       __field(u32, control_freq)                       \
                       __field(u32, width)                              \
                       __field(u32, center_freq1)                       \
@@ -2647,7 +2647,7 @@ TRACE_EVENT(cfg80211_scan_done,
        TP_STRUCT__entry(
                __field(u32, n_channels)
                __dynamic_array(u8, ie, request ? request->ie_len : 0)
-               __array(u32, rates, IEEE80211_NUM_BANDS)
+               __array(u32, rates, NUM_NL80211_BANDS)
                __field(u32, wdev_id)
                MAC_ENTRY(wiphy_mac)
                __field(bool, no_cck)
@@ -2658,7 +2658,7 @@ TRACE_EVENT(cfg80211_scan_done,
                        memcpy(__get_dynamic_array(ie), request->ie,
                               request->ie_len);
                        memcpy(__entry->rates, request->rates,
-                              IEEE80211_NUM_BANDS);
+                              NUM_NL80211_BANDS);
                        __entry->wdev_id = request->wdev ?
                                        request->wdev->identifier : 0;
                        if (request->wiphy)
@@ -2883,25 +2883,25 @@ TRACE_EVENT(rdev_start_radar_detection,
 
 TRACE_EVENT(rdev_set_mcast_rate,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
-                int mcast_rate[IEEE80211_NUM_BANDS]),
+                int mcast_rate[NUM_NL80211_BANDS]),
        TP_ARGS(wiphy, netdev, mcast_rate),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                NETDEV_ENTRY
-               __array(int, mcast_rate, IEEE80211_NUM_BANDS)
+               __array(int, mcast_rate, NUM_NL80211_BANDS)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                NETDEV_ASSIGN;
                memcpy(__entry->mcast_rate, mcast_rate,
-                      sizeof(int) * IEEE80211_NUM_BANDS);
+                      sizeof(int) * NUM_NL80211_BANDS);
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
                  "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
                  WIPHY_PR_ARG, NETDEV_PR_ARG,
-                 __entry->mcast_rate[IEEE80211_BAND_2GHZ],
-                 __entry->mcast_rate[IEEE80211_BAND_5GHZ],
-                 __entry->mcast_rate[IEEE80211_BAND_60GHZ])
+                 __entry->mcast_rate[NL80211_BAND_2GHZ],
+                 __entry->mcast_rate[NL80211_BAND_5GHZ],
+                 __entry->mcast_rate[NL80211_BAND_60GHZ])
 );
 
 TRACE_EVENT(rdev_set_coalesce,
index 9f440a9de63b486e5fd565533fa01975621abbbb..f36039888eb566ce2b116ed613added32689a3b1 100644 (file)
@@ -47,7 +47,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
        if (WARN_ON(!sband))
                return 1;
 
-       if (sband->band == IEEE80211_BAND_2GHZ) {
+       if (sband->band == NL80211_BAND_2GHZ) {
                if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
                    scan_width == NL80211_BSS_CHAN_WIDTH_10)
                        mandatory_flag = IEEE80211_RATE_MANDATORY_G;
@@ -65,26 +65,26 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
 }
 EXPORT_SYMBOL(ieee80211_mandatory_rates);
 
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
 {
        /* see 802.11 17.3.8.3.2 and Annex J
         * there are overlapping channel numbers in 5GHz and 2GHz bands */
        if (chan <= 0)
                return 0; /* not supported */
        switch (band) {
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                if (chan == 14)
                        return 2484;
                else if (chan < 14)
                        return 2407 + chan * 5;
                break;
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                if (chan >= 182 && chan <= 196)
                        return 4000 + chan * 5;
                else
                        return 5000 + chan * 5;
                break;
-       case IEEE80211_BAND_60GHZ:
+       case NL80211_BAND_60GHZ:
                if (chan < 5)
                        return 56160 + chan * 2160;
                break;
@@ -116,11 +116,11 @@ EXPORT_SYMBOL(ieee80211_frequency_to_channel);
 struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
                                                  int freq)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        int i;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wiphy->bands[band];
 
                if (!sband)
@@ -137,12 +137,12 @@ struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
 EXPORT_SYMBOL(__ieee80211_get_channel);
 
 static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
-                                    enum ieee80211_band band)
+                                    enum nl80211_band band)
 {
        int i, want;
 
        switch (band) {
-       case IEEE80211_BAND_5GHZ:
+       case NL80211_BAND_5GHZ:
                want = 3;
                for (i = 0; i < sband->n_bitrates; i++) {
                        if (sband->bitrates[i].bitrate == 60 ||
@@ -155,7 +155,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
                }
                WARN_ON(want);
                break;
-       case IEEE80211_BAND_2GHZ:
+       case NL80211_BAND_2GHZ:
                want = 7;
                for (i = 0; i < sband->n_bitrates; i++) {
                        if (sband->bitrates[i].bitrate == 10) {
@@ -185,12 +185,12 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
                }
                WARN_ON(want != 0 && want != 3 && want != 6);
                break;
-       case IEEE80211_BAND_60GHZ:
+       case NL80211_BAND_60GHZ:
                /* check for mandatory HT MCS 1..4 */
                WARN_ON(!sband->ht_cap.ht_supported);
                WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
                break;
-       case IEEE80211_NUM_BANDS:
+       case NUM_NL80211_BANDS:
                WARN_ON(1);
                break;
        }
@@ -198,9 +198,9 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+       for (band = 0; band < NUM_NL80211_BANDS; band++)
                if (wiphy->bands[band])
                        set_mandatory_flags_band(wiphy->bands[band], band);
 }
@@ -1399,22 +1399,22 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
 EXPORT_SYMBOL(ieee80211_ie_split_ric);
 
 bool ieee80211_operating_class_to_band(u8 operating_class,
-                                      enum ieee80211_band *band)
+                                      enum nl80211_band *band)
 {
        switch (operating_class) {
        case 112:
        case 115 ... 127:
        case 128 ... 130:
-               *band = IEEE80211_BAND_5GHZ;
+               *band = NL80211_BAND_5GHZ;
                return true;
        case 81:
        case 82:
        case 83:
        case 84:
-               *band = IEEE80211_BAND_2GHZ;
+               *band = NL80211_BAND_2GHZ;
                return true;
        case 180:
-               *band = IEEE80211_BAND_60GHZ;
+               *band = NL80211_BAND_60GHZ;
                return true;
        }
 
@@ -1726,10 +1726,10 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
 
 unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy)
 {
-       enum ieee80211_band band;
+       enum nl80211_band band;
        unsigned int n_channels = 0;
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+       for (band = 0; band < NUM_NL80211_BANDS; band++)
                if (wiphy->bands[band])
                        n_channels += wiphy->bands[band]->n_channels;
 
index fd682832a0e3635d52c734871d5402d270336dc3..4c89f0ca61ba0d2dde70efdf579062a3a90cced5 100644 (file)
@@ -32,13 +32,13 @@ int cfg80211_wext_giwname(struct net_device *dev,
        if (!wdev)
                return -EOPNOTSUPP;
 
-       sband = wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
+       sband = wdev->wiphy->bands[NL80211_BAND_5GHZ];
        if (sband) {
                is_a = true;
                is_ht |= sband->ht_cap.ht_supported;
        }
 
-       sband = wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
+       sband = wdev->wiphy->bands[NL80211_BAND_2GHZ];
        if (sband) {
                int i;
                /* Check for mandatory rates */
@@ -143,7 +143,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct iw_range *range = (struct iw_range *) extra;
-       enum ieee80211_band band;
+       enum nl80211_band band;
        int i, c = 0;
 
        if (!wdev)
@@ -215,7 +215,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
                }
        }
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band ++) {
                struct ieee80211_supported_band *sband;
 
                sband = wdev->wiphy->bands[band];
@@ -265,11 +265,11 @@ int cfg80211_wext_freq(struct iw_freq *freq)
         * -EINVAL for impossible things.
         */
        if (freq->e == 0) {
-               enum ieee80211_band band = IEEE80211_BAND_2GHZ;
+               enum nl80211_band band = NL80211_BAND_2GHZ;
                if (freq->m < 0)
                        return 0;
                if (freq->m > 14)
-                       band = IEEE80211_BAND_5GHZ;
+                       band = NL80211_BAND_5GHZ;
                return ieee80211_channel_to_frequency(freq->m, band);
        } else {
                int i, div = 1000000;
@@ -1245,7 +1245,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
                maxrate = rate->value / 100000;
        }
 
-       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
                sband = wdev->wiphy->bands[band];
                if (sband == NULL)
                        continue;
index ff4a91fcab9fd291b09eb48487690301c4388ae2..637387bbaaea33f62a1a970c9a361c895c4e5f2d 100644 (file)
@@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                skb_dst_force(skb);
 
+               /* Inner headers are invalid now. */
+               skb->encapsulation = 0;
+
                err = x->type->output(x, skb);
                if (err == -EINPROGRESS)
                        goto out;
index 2cc7af858c6f2a599aac11926d018d937622b236..d516845e16e30b69d2d47160ef9f45e6a4587a6b 100644 (file)
@@ -809,7 +809,8 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
                        goto out;
        }
        if (x->lastused) {
-               ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
+               ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
+                                       XFRMA_PAD);
                if (ret)
                        goto out;
        }
@@ -1813,7 +1814,7 @@ static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
 
        return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
               + nla_total_size(replay_size)
-              + nla_total_size(sizeof(struct xfrm_lifetime_cur))
+              + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
               + nla_total_size(sizeof(struct xfrm_mark))
               + nla_total_size(4) /* XFRM_AE_RTHR */
               + nla_total_size(4); /* XFRM_AE_ETHR */
@@ -1848,7 +1849,8 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
        }
        if (err)
                goto out_cancel;
-       err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+       err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
+                           XFRMA_PAD);
        if (err)
                goto out_cancel;
 
@@ -2617,7 +2619,7 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
                l += nla_total_size(sizeof(x->props.extra_flags));
 
        /* Must count x->lastused as it may become non-zero behind our back. */
-       l += nla_total_size(sizeof(u64));
+       l += nla_total_size_64bit(sizeof(u64));
 
        return l;
 }
index 9959771bf808877a2365c5f5457e666222ea7130..0bf2478cb7dfeed534a5fad022adc86c866b0c39 100644 (file)
@@ -60,6 +60,7 @@ always += spintest_kern.o
 always += map_perf_test_kern.o
 always += test_overhead_tp_kern.o
 always += test_overhead_kprobe_kern.o
+always += parse_varlen.o parse_simple.o parse_ldabs.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
@@ -81,16 +82,44 @@ HOSTLOADLIBES_spintest += -lelf
 HOSTLOADLIBES_map_perf_test += -lelf -lrt
 HOSTLOADLIBES_test_overhead += -lelf -lrt
 
-# point this to your LLVM backend with bpf support
-LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
+# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
+#  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+LLC ?= llc
+CLANG ?= clang
 
-# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
-# But, ehere is not easy way to fix it, so just exclude it since it is
+# Trick to allow make to be run from this directory
+all:
+       $(MAKE) -C ../../ $$PWD/
+
+clean:
+       $(MAKE) -C ../../ M=$$PWD clean
+       @rm -f *~
+
+# Verify LLVM compiler tools are available and bpf target is supported by llc
+.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
+
+verify_cmds: $(CLANG) $(LLC)
+       @for TOOL in $^ ; do \
+               if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
+                       echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
+                       exit 1; \
+               else true; fi; \
+       done
+
+verify_target_bpf: verify_cmds
+       @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
+               echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
+               echo "   NOTICE: LLVM version >= 3.7.1 required" ;\
+               exit 2; \
+       else true; fi
+
+$(src)/*.c: verify_target_bpf
+
+# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
+# But, there is no easy way to fix it, so just exclude it since it is
 # useless for BPF samples.
 $(obj)/%.o: $(src)/%.c
-       clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+       $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
                -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
+               -Wno-compare-distinct-pointer-types \
                -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
-       clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-               -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
-               -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
new file mode 100644 (file)
index 0000000..a43eae3
--- /dev/null
@@ -0,0 +1,66 @@
+eBPF sample programs
+====================
+
+This directory contains a mini eBPF library, test stubs, verifier
+test-suite and examples for using eBPF.
+
+Build dependencies
+==================
+
+Compiling requires having installed:
+ * clang >= version 3.4.0
+ * llvm >= version 3.7.1
+
+Note that LLVM's tool 'llc' must support target 'bpf', list version
+and supported targets with command: ``llc --version``
+
+Kernel headers
+--------------
+
+There are usually dependencies to header files of the current kernel.
+To avoid installing devel kernel headers system wide, as a normal
+user, simply call::
+
+ make headers_install
+
+This will creates a local "usr/include" directory in the git/build top
+level directory, that the make system automatically pickup first.
+
+Compiling
+=========
+
+For building the BPF samples, issue the below command from the kernel
+top level directory::
+
+ make samples/bpf/
+
+Do notice the "/" slash after the directory name.
+
+It is also possible to call make from this directory.  This will just
+hide the the invocation of make as above with the appended "/".
+
+Manually compiling LLVM with 'bpf' support
+------------------------------------------
+
+Since version 3.7.0, LLVM adds a proper LLVM backend target for the
+BPF bytecode architecture.
+
+By default llvm will build all non-experimental backends including bpf.
+To generate a smaller llc binary one can use::
+
+ -DLLVM_TARGETS_TO_BUILD="BPF"
+
+Quick sniplet for manually compiling LLVM and clang
+(build dependencies are cmake and gcc-c++)::
+
+ $ git clone http://llvm.org/git/llvm.git
+ $ cd llvm/tools
+ $ git clone --depth 1 http://llvm.org/git/clang.git
+ $ cd ..; mkdir build; cd build
+ $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86"
+ $ make -j $(getconf _NPROCESSORS_ONLN)
+
+It is also possible to point make to the newly compiled 'llc' or
+'clang' command via redefining LLC or CLANG on the make command line::
+
+ make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
index 9363500131a777f98cd7016a35b9cf9858dbf9a6..7904a2a493de43f69c0cf868d78c38d8509790af 100644 (file)
@@ -82,6 +82,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
 #define PT_REGS_FP(x) ((x)->bp)
 #define PT_REGS_RC(x) ((x)->ax)
 #define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->ip)
 
 #elif defined(__s390x__)
 
@@ -94,6 +95,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
 #define PT_REGS_RC(x) ((x)->gprs[2])
 #define PT_REGS_SP(x) ((x)->gprs[15])
+#define PT_REGS_IP(x) ((x)->ip)
 
 #elif defined(__aarch64__)
 
@@ -106,6 +108,30 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
 #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
 #define PT_REGS_RC(x) ((x)->regs[0])
 #define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->pc)
+
+#elif defined(__powerpc__)
+
+#define PT_REGS_PARM1(x) ((x)->gpr[3])
+#define PT_REGS_PARM2(x) ((x)->gpr[4])
+#define PT_REGS_PARM3(x) ((x)->gpr[5])
+#define PT_REGS_PARM4(x) ((x)->gpr[6])
+#define PT_REGS_PARM5(x) ((x)->gpr[7])
+#define PT_REGS_RC(x) ((x)->gpr[3])
+#define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->nip)
 
 #endif
+
+#ifdef __powerpc__
+#define BPF_KPROBE_READ_RET_IP(ip, ctx)                ({ (ip) = (ctx)->link; })
+#define BPF_KRETPROBE_READ_RET_IP              BPF_KPROBE_READ_RET_IP
+#else
+#define BPF_KPROBE_READ_RET_IP(ip, ctx)                ({                              \
+               bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx)     ({                              \
+               bpf_probe_read(&(ip), sizeof(ip),                               \
+                               (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+#endif
+
 #endif
index 95af56ec573989b9ce06783942fc1c21ebc5f5ed..3147377e8fd3c98fe455c6d96f5c774cb5392f11 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/bpf.h>
 #include <string.h>
 #include <time.h>
+#include <sys/resource.h>
 #include "libbpf.h"
 #include "bpf_load.h"
 
index 983629a31c79f6c4d0b4a00808d202cabe45c114..e7d9a0a3d45b12cfa68ace224be590709aba2008 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/version.h>
 #include <linux/sched.h>
 
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;})
 
 #define MINBLOCK_US    1
 
@@ -61,7 +61,7 @@ SEC("kprobe/try_to_wake_up")
 int waker(struct pt_regs *ctx)
 {
        struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
-       struct wokeby_t woke = {};
+       struct wokeby_t woke;
        u32 pid;
 
        pid = _(p->pid);
@@ -75,17 +75,19 @@ int waker(struct pt_regs *ctx)
 
 static inline int update_counts(void *ctx, u32 pid, u64 delta)
 {
-       struct key_t key = {};
        struct wokeby_t *woke;
        u64 zero = 0, *val;
+       struct key_t key;
 
+       __builtin_memset(&key.waker, 0, sizeof(key.waker));
        bpf_get_current_comm(&key.target, sizeof(key.target));
        key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
+       key.wret = 0;
 
        woke = bpf_map_lookup_elem(&wokeby, &pid);
        if (woke) {
                key.wret = woke->ret;
-               __builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
+               __builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
                bpf_map_delete_elem(&wokeby, &pid);
        }
 
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
new file mode 100644 (file)
index 0000000..d175501
--- /dev/null
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT        9
+#define IP_MF                  0x2000
+#define IP_OFFSET              0x1FFF
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+       return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+               & (IP_MF | IP_OFFSET);
+}
+
+SEC("ldabs")
+int handle_ingress(struct __sk_buff *skb)
+{
+       __u64 troff = ETH_HLEN + sizeof(struct iphdr);
+
+       if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP)
+               return 0;
+       if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP ||
+           load_byte(skb, ETH_HLEN) != 0x45)
+               return 0;
+       if (ip_is_fragment(skb, ETH_HLEN))
+               return 0;
+       if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT)
+               return TC_ACT_SHOT;
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
new file mode 100644 (file)
index 0000000..cf2511c
--- /dev/null
@@ -0,0 +1,48 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+       unsigned char   h_dest[ETH_ALEN];
+       unsigned char   h_source[ETH_ALEN];
+       unsigned short  h_proto;
+};
+
+SEC("simple")
+int handle_ingress(struct __sk_buff *skb)
+{
+       void *data = (void *)(long)skb->data;
+       struct eth_hdr *eth = data;
+       struct iphdr *iph = data + sizeof(*eth);
+       struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph);
+       void *data_end = (void *)(long)skb->data_end;
+
+       /* single length check */
+       if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end)
+               return 0;
+
+       if (eth->h_proto != htons(ETH_P_IP))
+               return 0;
+       if (iph->protocol != IPPROTO_UDP || iph->ihl != 5)
+               return 0;
+       if (ip_is_fragment(iph))
+               return 0;
+       if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT))
+               return TC_ACT_SHOT;
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
new file mode 100644 (file)
index 0000000..edab34d
--- /dev/null
@@ -0,0 +1,153 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+#define DEBUG 0
+
+static int tcp(void *data, uint64_t tp_off, void *data_end)
+{
+       struct tcphdr *tcp = data + tp_off;
+
+       if (tcp + 1 > data_end)
+               return 0;
+       if (tcp->dest == htons(80) || tcp->source == htons(80))
+               return TC_ACT_SHOT;
+       return 0;
+}
+
+static int udp(void *data, uint64_t tp_off, void *data_end)
+{
+       struct udphdr *udp = data + tp_off;
+
+       if (udp + 1 > data_end)
+               return 0;
+       if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT) ||
+           udp->source == htons(DEFAULT_PKTGEN_UDP_PORT)) {
+               if (DEBUG) {
+                       char fmt[] = "udp port 9 indeed\n";
+
+                       bpf_trace_printk(fmt, sizeof(fmt));
+               }
+               return TC_ACT_SHOT;
+       }
+       return 0;
+}
+
+static int parse_ipv4(void *data, uint64_t nh_off, void *data_end)
+{
+       struct iphdr *iph;
+       uint64_t ihl_len;
+
+       iph = data + nh_off;
+       if (iph + 1 > data_end)
+               return 0;
+
+       if (ip_is_fragment(iph))
+               return 0;
+       ihl_len = iph->ihl * 4;
+
+       if (iph->protocol == IPPROTO_IPIP) {
+               iph = data + nh_off + ihl_len;
+               if (iph + 1 > data_end)
+                       return 0;
+               ihl_len += iph->ihl * 4;
+       }
+
+       if (iph->protocol == IPPROTO_TCP)
+               return tcp(data, nh_off + ihl_len, data_end);
+       else if (iph->protocol == IPPROTO_UDP)
+               return udp(data, nh_off + ihl_len, data_end);
+       return 0;
+}
+
+static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
+{
+       struct ipv6hdr *ip6h;
+       struct iphdr *iph;
+       uint64_t ihl_len = sizeof(struct ipv6hdr);
+       uint64_t nexthdr;
+
+       ip6h = data + nh_off;
+       if (ip6h + 1 > data_end)
+               return 0;
+
+       nexthdr = ip6h->nexthdr;
+
+       if (nexthdr == IPPROTO_IPIP) {
+               iph = data + nh_off + ihl_len;
+               if (iph + 1 > data_end)
+                       return 0;
+               ihl_len += iph->ihl * 4;
+               nexthdr = iph->protocol;
+       } else if (nexthdr == IPPROTO_IPV6) {
+               ip6h = data + nh_off + ihl_len;
+               if (ip6h + 1 > data_end)
+                       return 0;
+               ihl_len += sizeof(struct ipv6hdr);
+               nexthdr = ip6h->nexthdr;
+       }
+
+       if (nexthdr == IPPROTO_TCP)
+               return tcp(data, nh_off + ihl_len, data_end);
+       else if (nexthdr == IPPROTO_UDP)
+               return udp(data, nh_off + ihl_len, data_end);
+       return 0;
+}
+
+struct vlan_hdr {
+       uint16_t h_vlan_TCI;
+       uint16_t h_vlan_encapsulated_proto;
+};
+
+SEC("varlen")
+int handle_ingress(struct __sk_buff *skb)
+{
+       void *data = (void *)(long)skb->data;
+       struct ethhdr *eth = data;
+       void *data_end = (void *)(long)skb->data_end;
+       uint64_t h_proto, nh_off;
+
+       nh_off = sizeof(*eth);
+       if (data + nh_off > data_end)
+               return 0;
+
+       h_proto = eth->h_proto;
+
+       if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+               struct vlan_hdr *vhdr;
+
+               vhdr = data + nh_off;
+               nh_off += sizeof(struct vlan_hdr);
+               if (data + nh_off > data_end)
+                       return 0;
+               h_proto = vhdr->h_vlan_encapsulated_proto;
+       }
+       if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+               struct vlan_hdr *vhdr;
+
+               vhdr = data + nh_off;
+               nh_off += sizeof(struct vlan_hdr);
+               if (data + nh_off > data_end)
+                       return 0;
+               h_proto = vhdr->h_vlan_encapsulated_proto;
+       }
+       if (h_proto == htons(ETH_P_IP))
+               return parse_ipv4(data, nh_off, data_end);
+       else if (h_proto == htons(ETH_P_IPV6))
+               return parse_ipv6(data, nh_off, data_end);
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
index 4b27619d91a46c680a01b91bf05ac6fd6b5ce1d7..ce0167d09cdc6f1432e687933bf38ebef59bad21 100644 (file)
@@ -34,7 +34,7 @@ struct bpf_map_def SEC("maps") stackmap = {
 #define PROG(foo) \
 int foo(struct pt_regs *ctx) \
 { \
-       long v = ctx->ip, *val; \
+       long v = PT_REGS_IP(ctx), *val; \
 \
        val = bpf_map_lookup_elem(&my_map, &v); \
        bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
diff --git a/samples/bpf/test_cls_bpf.sh b/samples/bpf/test_cls_bpf.sh
new file mode 100755 (executable)
index 0000000..0365d5e
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+function pktgen {
+    ../pktgen/pktgen_bench_xmit_mode_netif_receive.sh -i $IFC -s 64 \
+        -m 90:e2:ba:ff:ff:ff -d 192.168.0.1 -t 4
+    local dropped=`tc -s qdisc show dev $IFC | tail -3 | awk '/drop/{print $7}'`
+    if [ "$dropped" == "0," ]; then
+        echo "FAIL"
+    else
+        echo "Successfully filtered " $dropped " packets"
+    fi
+}
+
+function test {
+    echo -n "Loading bpf program '$2'... "
+    tc qdisc add dev $IFC clsact
+    tc filter add dev $IFC ingress bpf da obj $1 sec $2
+    local status=$?
+    if [ $status -ne 0 ]; then
+        echo "FAIL"
+    else
+        echo "ok"
+       pktgen
+    fi
+    tc qdisc del dev $IFC clsact
+}
+
+IFC=test_veth
+
+ip link add name $IFC type veth peer name pair_$IFC
+ip link set $IFC up
+ip link set pair_$IFC up
+
+test ./parse_simple.o simple
+test ./parse_varlen.o varlen
+test ./parse_ldabs.o ldabs
+ip link del dev $IFC
index 4b51a9039c0d4abd9a4995b2df0d178e0422ff19..fe2fcec98c1ff087a308219e370de3040df11ee5 100644 (file)
@@ -308,6 +308,19 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .result_unpriv = REJECT,
        },
+       {
+               "check valid spill/fill, skb mark",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .result_unpriv = ACCEPT,
+       },
        {
                "check corrupted spill/fill",
                .insns = {
@@ -1180,6 +1193,341 @@ static struct bpf_test tests[] = {
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
+       {
+               "raw_stack: no skb_load_bytes",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       /* Call to skb_load_bytes() omitted. */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid read from stack off -8+0 size 8",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, no init",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, init",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, spilled regs around bounds",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, spilled regs corruption",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 invalid mem access 'inv'",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, spilled regs corruption 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0), /* spill ctx from R1 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0), /* fill ctx into R3 */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+                                   offsetof(struct __sk_buff, pkt_type)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R3 invalid mem access 'inv'",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, spilled regs + data",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0), /* spill ctx from R1 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0), /* fill data into R3 */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-513 access_size=8",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=8",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=-1",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 4",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 5",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 6",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-512 access_size=0",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, large access",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 512),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "pkt: test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "pkt: test2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "pkt: test3",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access off=76",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+       },
+       {
+               "pkt: test4",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "cannot write",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
index 8d8d1ec429eb6761b0d29b30299536ac782779c5..9b96f4fb8cea64f9b98c835b045bd8c840719433 100644 (file)
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
                u64 cookie;
        } data;
 
-       memset(&data, 0, sizeof(data));
        data.pid = bpf_get_current_pid_tgid();
        data.cookie = 0x12345678;
 
index 3f450a8fa1f3487b2cf2d9b261f63dc3d25f6599..107da148820fc6cdaadcd6e55ab38b8fa78015b5 100644 (file)
@@ -23,16 +23,14 @@ int bpf_prog1(struct pt_regs *ctx)
        /* attaches to kprobe netif_receive_skb,
         * looks for packets on loobpack device and prints them
         */
-       char devname[IFNAMSIZ] = {};
+       char devname[IFNAMSIZ];
        struct net_device *dev;
        struct sk_buff *skb;
        int len;
 
        /* non-portable! works for the given kernel only */
        skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
-
        dev = _(skb->dev);
-
        len = _(skb->len);
 
        bpf_probe_read(devname, sizeof(devname), dev->name);
index 09c1adc27d426ed4adec7408a6fbae9193c39bb0..5e11c20ce5ecd9558c4a01f34ad7f994e3418809 100644 (file)
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
        long init_val = 1;
        long *value;
 
-       /* x64/s390x specific: read ip of kfree_skb caller.
+       /* read ip of kfree_skb caller.
         * non-portable version of __builtin_return_address(0)
         */
-       bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
+       BPF_KPROBE_READ_RET_IP(loc, ctx);
 
        value = bpf_map_lookup_elem(&my_map, &loc);
        if (value)
@@ -66,7 +66,7 @@ struct hist_key {
        char comm[16];
        u64 pid_tgid;
        u64 uid_gid;
-       u32 index;
+       u64 index;
 };
 
 struct bpf_map_def SEC("maps") my_hist_map = {
@@ -82,7 +82,7 @@ int bpf_prog3(struct pt_regs *ctx)
        long write_size = PT_REGS_PARM3(ctx);
        long init_val = 1;
        long *value;
-       struct hist_key key = {};
+       struct hist_key key;
 
        key.index = log2l(write_size);
        key.pid_tgid = bpf_get_current_pid_tgid();
index ac4671420cf15949c4087b8c2847c69978646650..6dd8e384de961b66269f5da840b5d4b22d057ea8 100644 (file)
@@ -40,7 +40,7 @@ int bpf_prog2(struct pt_regs *ctx)
        long ip = 0;
 
        /* get ip address of kmem_cache_alloc_node() caller */
-       bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
+       BPF_KRETPROBE_READ_RET_IP(ip, ctx);
 
        struct pair v = {
                .val = bpf_ktime_get_ns(),
index b3f4295bf288536c1f9ae7500b542a77cf8aaec1..f95f232cbab9e308cd65396e1a1f85d3ac9544b9 100644 (file)
@@ -22,7 +22,7 @@ struct bpf_map_def SEC("maps") progs = {
 SEC("kprobe/seccomp_phase1")
 int bpf_prog1(struct pt_regs *ctx)
 {
-       struct seccomp_data sd = {};
+       struct seccomp_data sd;
 
        bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
@@ -40,7 +40,7 @@ int bpf_prog1(struct pt_regs *ctx)
 /* we jump here when syscall number == __NR_write */
 PROG(__NR_write)(struct pt_regs *ctx)
 {
-       struct seccomp_data sd = {};
+       struct seccomp_data sd;
 
        bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] == 512) {
@@ -53,7 +53,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
 
 PROG(__NR_read)(struct pt_regs *ctx)
 {
-       struct seccomp_data sd = {};
+       struct seccomp_data sd;
 
        bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] > 128 && sd.args[2] <= 1024) {
index e000f44e37b8775f45023464b765a460d92d1204..c1b7ef3e24c1c055cc90d1e986b11308ff7d2e02 100644 (file)
@@ -650,7 +650,7 @@ int main(int argc, char **argv)
        }
 
        hdr = fopen(headername, "w");
-       if (!out) {
+       if (!hdr) {
                perror(headername);
                exit(1);
        }
index 161dd0d67da8a84f0062672cd9125c491a2f58e3..a9155077feefb957d38dc29b4823a9529a7ee0e9 100644 (file)
@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
                do_usb_entry_multi(symval + i, mod);
 }
 
+static void do_of_entry_multi(void *symval, struct module *mod)
+{
+       char alias[500];
+       int len;
+       char *tmp;
+
+       DEF_FIELD_ADDR(symval, of_device_id, name);
+       DEF_FIELD_ADDR(symval, of_device_id, type);
+       DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
+       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+                     (*type)[0] ? *type : "*");
+
+       if (compatible[0])
+               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+                       *compatible);
+
+       /* Replace all whitespace with underscores */
+       for (tmp = alias; tmp && *tmp; tmp++)
+               if (isspace(*tmp))
+                       *tmp = '_';
+
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+       strcat(alias, "C");
+       add_wildcard(alias);
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+}
+
+static void do_of_table(void *symval, unsigned long size,
+                       struct module *mod)
+{
+       unsigned int i;
+       const unsigned long id_size = SIZE_of_device_id;
+
+       device_id_check(mod->name, "of", size, id_size, symval);
+
+       /* Leave last one: it's the terminator. */
+       size -= id_size;
+
+       for (i = 0; i < size; i += id_size)
+               do_of_entry_multi(symval + i, mod);
+}
+
 /* Looks like: hid:bNvNpN */
 static int do_hid_entry(const char *filename,
                             void *symval, char *alias)
@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
 }
 ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
-static int do_of_entry (const char *filename, void *symval, char *alias)
-{
-       int len;
-       char *tmp;
-       DEF_FIELD_ADDR(symval, of_device_id, name);
-       DEF_FIELD_ADDR(symval, of_device_id, type);
-       DEF_FIELD_ADDR(symval, of_device_id, compatible);
-
-       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
-                     (*type)[0] ? *type : "*");
-
-       if (compatible[0])
-               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
-                       *compatible);
-
-       /* Replace all whitespace with underscores */
-       for (tmp = alias; tmp && *tmp; tmp++)
-               if (isspace (*tmp))
-                       *tmp = '_';
-
-       return 1;
-}
-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
-
 static int do_vio_entry(const char *filename, void *symval,
                char *alias)
 {
@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
        /* First handle the "special" cases */
        if (sym_is(name, namelen, "usb"))
                do_usb_table(symval, sym->st_size, mod);
+       if (sym_is(name, namelen, "of"))
+               do_of_table(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp"))
                do_pnp_device_entry(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp_card"))
index be09e2cacf828d32c75028ccf04f31e88657da70..3cd0a58672dd1df684e12d0a4331b56fd4fa12fb 100644 (file)
@@ -884,10 +884,10 @@ static char *func_tokens[] = {
        "BPRM_CHECK",
        "MODULE_CHECK",
        "FIRMWARE_CHECK",
+       "POST_SETATTR",
        "KEXEC_KERNEL_CHECK",
        "KEXEC_INITRAMFS_CHECK",
-       "POLICY_CHECK",
-       "POST_SETATTR"
+       "POLICY_CHECK"
 };
 
 void *ima_policy_start(struct seq_file *m, loff_t *pos)
index 8495b93681906bd39f4065723461cddac2e7d347..2ca9cde939d44976365aa67fe72f51be9b92897d 100644 (file)
@@ -76,6 +76,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
        { RTM_NEWNSID,          NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
        { RTM_DELNSID,          NETLINK_ROUTE_SOCKET__NLMSG_READ  },
        { RTM_GETNSID,          NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+       { RTM_NEWSTATS,         NETLINK_ROUTE_SOCKET__NLMSG_READ },
+       { RTM_GETSTATS,         NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 };
 
 static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -155,7 +157,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
        switch (sclass) {
        case SECCLASS_NETLINK_ROUTE_SOCKET:
                /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
-               BUILD_BUG_ON(RTM_MAX != (RTM_NEWNSID + 3));
+               BUILD_BUG_ON(RTM_MAX != (RTM_NEWSTATS + 3));
                err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
                                 sizeof(nlmsg_route_perms));
                break;
index 023cc4cad5c1965b31a334503d94c37752e733e4..626f3bb24c55da1411f3616566c94253c89ece79 100644 (file)
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
  */
 void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
 {
-       struct hdac_stream *s;
+       struct hdac_stream *s, *_s;
        struct hdac_ext_stream *stream;
        struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-       while (!list_empty(&bus->stream_list)) {
-               s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
+       list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
                stream = stream_to_hdac_ext_stream(s);
                snd_hdac_ext_stream_decouple(ebus, stream, false);
                list_del(&s->list);
index d1a4d697333077126f99f4951c00f647d06aaf5e..03c9872c31cfe4a2a723d3e4bb0f4896b313dc32 100644 (file)
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
                                int parm)
 {
-       int val;
+       unsigned int cmd, val;
 
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, true);
-       val = snd_hdac_read_parm(codec, nid, parm);
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, false);
+       cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+       if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
+               return -1;
        return val;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
index fb96aead825707e7bca496bf9fcd1af389873dbc..607bbeaebddf784aadb062c7b3934b4fa506c1e0 100644 (file)
@@ -20,6 +20,7 @@
 #include <sound/core.h>
 #include <sound/hdaudio.h>
 #include <sound/hda_i915.h>
+#include <sound/hda_register.h>
 
 static struct i915_audio_component *hdac_acomp;
 
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
 }
 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
 
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+                               ((pci)->device == 0x0c0c) || \
+                               ((pci)->device == 0x0d0c) || \
+                               ((pci)->device == 0x160c))
+
 /**
- * snd_hdac_get_display_clk - Get CDCLK in kHz
+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
  * @bus: HDA core bus
  *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+ * BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled and need to
+ * be restored to avoid abnormal playback speed.
  *
- * This function queries CDCLK value in kHz from the graphics driver and
- * returns the value.  A negative code is returned in error.
+ * Call this function at initializing and changing power well, as well as
+ * at ELD notifier for the hotplug.
  */
-int snd_hdac_get_display_clk(struct hdac_bus *bus)
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
 {
        struct i915_audio_component *acomp = bus->audio_component;
+       struct pci_dev *pci = to_pci_dev(bus->dev);
+       int cdclk_freq;
+       unsigned int bclk_m, bclk_n;
+
+       if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
+               return; /* only for i915 binding */
+       if (!CONTROLLER_IN_GPU(pci))
+               return; /* only HSW/BDW */
+
+       cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
+       switch (cdclk_freq) {
+       case 337500:
+               bclk_m = 16;
+               bclk_n = 225;
+               break;
+
+       case 450000:
+       default: /* default CDCLK 450MHz */
+               bclk_m = 4;
+               bclk_n = 75;
+               break;
+
+       case 540000:
+               bclk_m = 4;
+               bclk_n = 90;
+               break;
+
+       case 675000:
+               bclk_m = 8;
+               bclk_n = 225;
+               break;
+       }
 
-       if (!acomp || !acomp->ops)
-               return -ENODEV;
-
-       return acomp->ops->get_cdclk_freq(acomp->dev);
+       snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
+       snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
 }
-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
 
 /* There is a fixed mapping between audio pin node and display port
  * on current Intel platforms:
@@ -267,6 +307,18 @@ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops
 }
 EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
 
+/* check whether intel graphics is present */
+static bool i915_gfx_present(void)
+{
+       static struct pci_device_id ids[] = {
+               { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
+                 .class = PCI_BASE_CLASS_DISPLAY << 16,
+                 .class_mask = 0xff << 16 },
+               {}
+       };
+       return pci_dev_present(ids);
+}
+
 /**
  * snd_hdac_i915_init - Initialize i915 audio component
  * @bus: HDA core bus
@@ -286,6 +338,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        struct i915_audio_component *acomp;
        int ret;
 
+       if (!i915_gfx_present())
+               return -ENODEV;
+
        acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
        if (!acomp)
                return -ENOMEM;
index bdbcd6b75ff61cb0495e36072eaefa2ec2478dc2..87041ddd29cbcbf2c63fca9ed90ffdbc0e5f249d 100644 (file)
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
 
 static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
-                       unsigned int *val)
+                       unsigned int *val, bool uncached)
 {
-       if (!codec->regmap)
+       if (uncached || !codec->regmap)
                return hda_reg_read(codec, reg, val);
        else
                return regmap_read(codec->regmap, reg, val);
 }
 
+static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val,
+                                     bool uncached)
+{
+       int err;
+
+       err = reg_raw_read(codec, reg, val, uncached);
+       if (err == -EAGAIN) {
+               err = snd_hdac_power_up_pm(codec);
+               if (!err)
+                       err = reg_raw_read(codec, reg, val, uncached);
+               snd_hdac_power_down_pm(codec);
+       }
+       return err;
+}
+
 /**
  * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
  * @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val)
 {
-       int err;
-
-       err = reg_raw_read(codec, reg, val);
-       if (err == -EAGAIN) {
-               err = snd_hdac_power_up_pm(codec);
-               if (!err)
-                       err = reg_raw_read(codec, reg, val);
-               snd_hdac_power_down_pm(codec);
-       }
-       return err;
+       return __snd_hdac_regmap_read_raw(codec, reg, val, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
 
+/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
+ * cache but always via hda verbs.
+ */
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val)
+{
+       return __snd_hdac_regmap_read_raw(codec, reg, val, true);
+}
+
 /**
  * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
  * @codec: the codec object
index 7b248cdf06e2166d82dfc69ea318247fc8eca6f7..fdcfa29e220551b473534938d8995c7abf7ebe84 100644 (file)
@@ -591,7 +591,7 @@ static int sscape_upload_microcode(struct snd_card *card, int version)
        }
        err = upload_dma_data(sscape, init_fw->data, init_fw->size);
        if (err == 0)
-               snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n",
+               snd_printk(KERN_INFO "sscape: MIDI firmware loaded %zu KBs\n",
                                init_fw->size >> 10);
 
        release_firmware(init_fw);
index 7ca5b89f088a6922e6acd09c12befae864994320..dfaf1a93fb8a3b8aba4fee3f3090a48b08ed2734 100644 (file)
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                                   bool allow_powerdown)
 {
        hda_nid_t nid, changed = 0;
-       int i, state;
+       int i, state, power;
 
        for (i = 0; i < path->depth; i++) {
                nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                        state = AC_PWRST_D0;
                else
                        state = AC_PWRST_D3;
-               if (!snd_hda_check_power_state(codec, nid, state)) {
+               power = snd_hda_codec_read(codec, nid, 0,
+                                          AC_VERB_GET_POWER_STATE, 0);
+               if (power != (state | (state << 4))) {
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE, state);
                        changed = nid;
index b680be0e937d9cd7b77b8e21365a73fe9a8c925b..9a0d1445ca5cf85c774a6f4e4be1d5a44770adad 100644 (file)
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
 #define azx_del_card_list(chip) /* NOP */
 #endif /* CONFIG_PM */
 
-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
- * BCLK = CDCLK * M / N
- * The values will be lost when the display power well is disabled and need to
- * be restored to avoid abnormal playback speed.
- */
-static void haswell_set_bclk(struct hda_intel *hda)
-{
-       struct azx *chip = &hda->chip;
-       int cdclk_freq;
-       unsigned int bclk_m, bclk_n;
-
-       if (!hda->need_i915_power)
-               return;
-
-       cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
-       switch (cdclk_freq) {
-       case 337500:
-               bclk_m = 16;
-               bclk_n = 225;
-               break;
-
-       case 450000:
-       default: /* default CDCLK 450MHz */
-               bclk_m = 4;
-               bclk_n = 75;
-               break;
-
-       case 540000:
-               bclk_m = 4;
-               bclk_n = 90;
-               break;
-
-       case 675000:
-               bclk_m = 8;
-               bclk_n = 225;
-               break;
-       }
-
-       azx_writew(chip, HSW_EM4, bclk_m);
-       azx_writew(chip, HSW_EM5, bclk_n);
-}
-
 #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
 /*
  * power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
                && hda->need_i915_power) {
                snd_hdac_display_power(azx_bus(chip), true);
-               haswell_set_bclk(hda);
+               snd_hdac_i915_set_bclk(azx_bus(chip));
        }
        if (chip->msi)
                if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
                bus = azx_bus(chip);
                if (hda->need_i915_power) {
                        snd_hdac_display_power(bus, true);
-                       haswell_set_bclk(hda);
+                       snd_hdac_i915_set_bclk(bus);
                } else {
                        /* toggle codec wakeup bit for STATESTS read */
                        snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
        /* initialize chip */
        azx_init_pci(chip);
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
-               struct hda_intel *hda;
-
-               hda = container_of(chip, struct hda_intel, chip);
-               haswell_set_bclk(hda);
-       }
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+               snd_hdac_i915_set_bclk(bus);
 
        hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
 
@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+       /* Broxton-T */
+       { PCI_DEVICE(0x8086, 0x1a98),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index a47e8ae0eb308355416886d4753c9f950bd0cd56..80bbadc83721447754392238118eee98484616b6 100644 (file)
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
        int err;
+       int i;
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
        if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
        if (err < 0)
                return err;
 
+       /* keep the ADCs powered up when it's dynamically switchable */
+       if (spec->gen.dyn_adc_switch) {
+               unsigned int done = 0;
+               for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+                       int idx = spec->gen.dyn_adc_idx[i];
+                       if (done & (1 << idx))
+                               continue;
+                       snd_hda_gen_fix_pin_power(codec,
+                                                 spec->gen.adc_nids[idx]);
+                       done |= 1 << idx;
+               }
+       }
+
        return 0;
 }
 
index 5af372d018346751f1dc264b413218ff08adcd22..1483f85999ecd82d1b9215f5a634011819fd368a 100644 (file)
@@ -1396,7 +1396,6 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
-       struct hdmi_eld *pin_eld = &per_pin->sink_eld;
        hda_nid_t pin_nid = per_pin->pin_nid;
        /*
         * Always execute a GetPinSense verb here, even when called from
@@ -1413,15 +1412,15 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        present = snd_hda_pin_sense(codec, pin_nid);
 
        mutex_lock(&per_pin->lock);
-       pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
-       if (pin_eld->monitor_present)
+       eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+       if (eld->monitor_present)
                eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
        else
                eld->eld_valid = false;
 
        codec_dbg(codec,
                "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-               codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
+               codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
 
        if (eld->eld_valid) {
                if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
@@ -1441,7 +1440,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        else
                update_eld(codec, per_pin, eld);
 
-       ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
+       ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
        jack = snd_hda_jack_tbl_get(codec, pin_nid);
        if (jack)
@@ -1859,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
 
+       if (!per_pin)
+               return;
        mutex_lock(&per_pin->lock);
        per_pin->chmap_set = true;
        memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
@@ -2231,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
        if (atomic_read(&(codec)->core.in_pm))
                return;
 
+       snd_hdac_i915_set_bclk(&codec->bus->core);
        check_presence_and_report(codec, pin_nid);
 }
 
index fefe83f2beabd662ecf882a832cfc66c03580bd2..ac4490a968638ff7eed3b4007ccdcda8a7b81cea 100644 (file)
@@ -4760,6 +4760,7 @@ enum {
        ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC280_FIXUP_HP_HEADSET_MIC,
        ALC221_FIXUP_HP_FRONT_MIC,
+       ALC292_FIXUP_TPT460,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5409,6 +5410,12 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
        },
+       [ALC292_FIXUP_TPT460] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_tpt440_dock,
+               .chained = true,
+               .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5442,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5563,7 +5571,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
-       SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+       SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -5576,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5658,6 +5667,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
        {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+       {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index c5194f5b150aadb7d0b08cd5831aab2a03f2392d..d7e71f3092998cdaa3894adefdd9468c236bb24e 100644 (file)
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
        }
 
        pcxhr_msg_thread(mgr);
+       mutex_unlock(&mgr->lock);
        return IRQ_HANDLED;
 }
index 649e92a252aec7699351d02d87f96f6965c4190a..7ef3a0c16478d707b1ef843aecf0631a51813fb1 100644 (file)
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
 
 config SND_SOC_RT5616
        tristate "Realtek RT5616 CODEC"
+       depends on I2C
 
 config SND_SOC_RT5631
        tristate "Realtek ALC5631/RT5631 CODEC"
index 92d22a018d68bf13bf57def12d6c2c82e1e9d933..83959312f7a0f5d5e1fcee33385fd1cba0e4db6f 100644 (file)
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
 }
 EXPORT_SYMBOL_GPL(arizona_init_spk);
 
+int arizona_free_spk(struct snd_soc_codec *codec)
+{
+       struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->arizona;
+
+       arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
+       arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_free_spk);
+
 static const struct snd_soc_dapm_route arizona_mono_routes[] = {
        { "OUT1R", NULL, "OUT1L" },
        { "OUT2R", NULL, "OUT2L" },
index 1ea8e4ecf8d41bbf9d3a792f4d88cf66383052a4..ce0531b8c6329ed4af58246f5eaa115193090b66 100644 (file)
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
 extern int arizona_init_gpio(struct snd_soc_codec *codec);
 extern int arizona_init_mono(struct snd_soc_codec *codec);
 
+extern int arizona_free_spk(struct snd_soc_codec *codec);
+
 extern int arizona_init_dai(struct arizona_priv *priv, int dai);
 
 int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
index 44c30fe3e3151dc6621c308da5c1f4759a6d3f65..287d13740be4e08bbbf7ce29526a97c4eed7b814 100644 (file)
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
        if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
                pdata->sdout_share = val;
 
-       of_property_read_u32(np, "cirrus,boost-manager", &val);
+       if (of_property_read_u32(np, "cirrus,boost-manager", &val))
+               val = -1u;
+
        switch (val) {
        case CS35L32_BOOST_MGR_AUTO:
        case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
        case CS35L32_BOOST_MGR_FIXED:
                pdata->boost_mng = val;
                break;
+       case -1u:
        default:
                dev_err(&i2c_client->dev,
                        "Wrong cirrus,boost-manager DT value %d\n", val);
                pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
        }
 
-       of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+       if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
+               val = -1u;
        switch (val) {
        case CS35L32_DATA_CFG_LR_VP:
        case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
        case CS35L32_DATA_CFG_LR_VPSTAT:
                pdata->sdout_datacfg = val;
                break;
+       case -1u:
        default:
                dev_err(&i2c_client->dev,
                        "Wrong cirrus,sdout-datacfg DT value %d\n", val);
                pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
        }
 
-       of_property_read_u32(np, "cirrus,battery-threshold", &val);
+       if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
+               val = -1u;
        switch (val) {
        case CS35L32_BATT_THRESH_3_1V:
        case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
        case CS35L32_BATT_THRESH_3_4V:
                pdata->batt_thresh = val;
                break;
+       case -1u:
        default:
                dev_err(&i2c_client->dev,
                        "Wrong cirrus,battery-threshold DT value %d\n", val);
                pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
        }
 
-       of_property_read_u32(np, "cirrus,battery-recovery", &val);
+       if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
+               val = -1u;
        switch (val) {
        case CS35L32_BATT_RECOV_3_1V:
        case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
        case CS35L32_BATT_RECOV_3_6V:
                pdata->batt_recov = val;
                break;
+       case -1u:
        default:
                dev_err(&i2c_client->dev,
                        "Wrong cirrus,battery-recovery DT value %d\n", val);
index 576087bda330ce840bdb4f90777ee7c9f1d580d9..00e9b6fc1b5cab0ddf8b4c3d945a3b2ab27dc749 100644 (file)
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
        priv->core.arizona->dapm = NULL;
 
        arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+       arizona_free_spk(codec);
+
        return 0;
 }
 
index 26f9459cb3bc8e6bb60aa2b71ddeaf427cb869ab..aaa038ffc8a50173a500483b307a76ac2b58b09c 100644 (file)
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
 }
 
 #ifdef CONFIG_PM
-static int hdmi_codec_resume(struct snd_soc_codec *codec)
+static int hdmi_codec_prepare(struct device *dev)
 {
-       struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
+       struct hdac_ext_device *edev = to_hda_ext_device(dev);
+       struct hdac_device *hdac = &edev->hdac;
+
+       pm_runtime_get_sync(&edev->hdac.dev);
+
+       /*
+        * Power down afg.
+        * codec_read is preferred over codec_write to set the power state.
+        * This way verb is send to set the power state and response
+        * is received. So setting power state is ensured without using loop
+        * to read the state.
+        */
+       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+                                                       AC_PWRST_D3);
+
+       return 0;
+}
+
+static void hdmi_codec_complete(struct device *dev)
+{
+       struct hdac_ext_device *edev = to_hda_ext_device(dev);
        struct hdac_hdmi_priv *hdmi = edev->private_data;
        struct hdac_hdmi_pin *pin;
        struct hdac_device *hdac = &edev->hdac;
-       struct hdac_bus *bus = hdac->bus;
-       int err;
-       unsigned long timeout;
-
-       hdac_hdmi_skl_enable_all_pins(&edev->hdac);
-       hdac_hdmi_skl_enable_dp12(&edev->hdac);
 
        /* Power up afg */
-       if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) {
-
-               snd_hdac_codec_write(hdac, hdac->afg, 0,
-                       AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+                                                       AC_PWRST_D0);
 
-               /* Wait till power state is set to D0 */
-               timeout = jiffies + msecs_to_jiffies(1000);
-               while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
-                               && time_before(jiffies, timeout)) {
-                       msleep(50);
-               }
-       }
+       hdac_hdmi_skl_enable_all_pins(&edev->hdac);
+       hdac_hdmi_skl_enable_dp12(&edev->hdac);
 
        /*
         * As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
        list_for_each_entry(pin, &hdmi->pin_list, head)
                hdac_hdmi_present_sense(pin, 1);
 
-       /*
-        * Codec power is turned ON during controller resume.
-        * Turn it OFF here
-        */
-       err = snd_hdac_display_power(bus, false);
-       if (err < 0) {
-               dev_err(bus->dev,
-                       "Cannot turn OFF display power on i915, err: %d\n",
-                       err);
-               return err;
-       }
-
-       return 0;
+       pm_runtime_put_sync(&edev->hdac.dev);
 }
 #else
-#define hdmi_codec_resume NULL
+#define hdmi_codec_prepare NULL
+#define hdmi_codec_complete NULL
 #endif
 
 static struct snd_soc_codec_driver hdmi_hda_codec = {
        .probe          = hdmi_codec_probe,
        .remove         = hdmi_codec_remove,
-       .resume         = hdmi_codec_resume,
        .idle_bias_off  = true,
 };
 
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
        struct hdac_ext_device *edev = to_hda_ext_device(dev);
        struct hdac_device *hdac = &edev->hdac;
        struct hdac_bus *bus = hdac->bus;
-       unsigned long timeout;
        int err;
 
        dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
        if (!bus)
                return 0;
 
-       /* Power down afg */
-       if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) {
-               snd_hdac_codec_write(hdac, hdac->afg, 0,
-                       AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-
-               /* Wait till power state is set to D3 */
-               timeout = jiffies + msecs_to_jiffies(1000);
-               while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)
-                               && time_before(jiffies, timeout)) {
-
-                       msleep(50);
-               }
-       }
-
+       /*
+        * Power down afg.
+        * codec_read is preferred over codec_write to set the power state.
+        * This way verb is send to set the power state and response
+        * is received. So setting power state is ensured without using loop
+        * to read the state.
+        */
+       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+                                                       AC_PWRST_D3);
        err = snd_hdac_display_power(bus, false);
        if (err < 0) {
                dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
        hdac_hdmi_skl_enable_dp12(&edev->hdac);
 
        /* Power up afg */
-       if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0))
-               snd_hdac_codec_write(hdac, hdac->afg, 0,
-                       AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+                                                       AC_PWRST_D0);
 
        return 0;
 }
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
 
 static const struct dev_pm_ops hdac_hdmi_pm = {
        SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
+       .prepare = hdmi_codec_prepare,
+       .complete = hdmi_codec_complete,
 };
 
 static const struct hda_device_id hdmi_list[] = {
index 1c8729984c2b6332502a0f90df457b1df59f22f0..683769f0f24693bae0ce89eee8938b815721a6fd 100644 (file)
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
                0),
 
-       /* ADC for button press detection */
-       SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL,
-               NAU8825_SAR_ADC_EN_SFT, 0),
+       /* ADC for button press detection. A dapm supply widget is used to
+        * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
+        * during suspend.
+        */
+       SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
+               NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
 
        SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
        SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
 
 static void nau8825_restart_jack_detection(struct regmap *regmap)
 {
+       /* Chip needs one FSCLK cycle in order to generate interrupts,
+        * as we cannot guarantee one will be provided by the system. Turning
+        * master mode on then off enables us to generate that FSCLK cycle
+        * with a minimum of contention on the clock bus.
+        */
+       regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+               NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+       regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+               NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
        /* this will restart the entire jack detection process including MIC/GND
         * switching and create interrupts. We have to go from 0 to 1 and back
         * to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
        struct regmap *regmap = nau8825->regmap;
        int active_irq, clear_irq = 0, event = 0, event_mask = 0;
 
-       regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+       if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
+               dev_err(nau8825->dev, "failed to read irq status\n");
+               return IRQ_NONE;
+       }
 
        if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
                NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
                                        return ret;
                                }
                        }
-
-                       ret = regcache_sync(nau8825->regmap);
-                       if (ret) {
-                               dev_err(codec->dev,
-                                       "Failed to sync cache: %d\n", ret);
-                               return ret;
-                       }
                }
-
                break;
 
        case SND_SOC_BIAS_OFF:
                if (nau8825->mclk_freq)
                        clk_disable_unprepare(nau8825->mclk);
-
-               regcache_mark_dirty(nau8825->regmap);
                break;
        }
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int nau8825_suspend(struct snd_soc_codec *codec)
+{
+       struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+       disable_irq(nau8825->irq);
+       regcache_cache_only(nau8825->regmap, true);
+       regcache_mark_dirty(nau8825->regmap);
+
+       return 0;
+}
+
+static int nau8825_resume(struct snd_soc_codec *codec)
+{
+       struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+       /* The chip may lose power and reset in S3. regcache_sync restores
+        * register values including configurations for sysclk, irq, and
+        * jack/button detection.
+        */
+       regcache_cache_only(nau8825->regmap, false);
+       regcache_sync(nau8825->regmap);
+
+       /* Check the jack plug status directly. If the headset is unplugged
+        * during S3 when the chip has no power, there will be no jack
+        * detection irq even after the nau8825_restart_jack_detection below,
+        * because the chip just thinks no headset has ever been plugged in.
+        */
+       if (!nau8825_is_jack_inserted(nau8825->regmap)) {
+               nau8825_eject_jack(nau8825);
+               snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+       }
+
+       enable_irq(nau8825->irq);
+
+       /* Run jack detection to check the type (OMTP or CTIA) of the headset
+        * if there is one. This handles the case where a different type of
+        * headset is plugged in during S3. This triggers an IRQ iff a headset
+        * is already plugged in.
+        */
+       nau8825_restart_jack_detection(nau8825->regmap);
+
+       return 0;
+}
+#else
+#define nau8825_suspend NULL
+#define nau8825_resume NULL
+#endif
+
 static struct snd_soc_codec_driver nau8825_codec_driver = {
        .probe = nau8825_codec_probe,
        .set_sysclk = nau8825_set_sysclk,
        .set_pll = nau8825_set_pll,
        .set_bias_level = nau8825_set_bias_level,
        .suspend_bias_off = true,
+       .suspend = nau8825_suspend,
+       .resume = nau8825_resume,
 
        .controls = nau8825_controls,
        .num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
        regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
                NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
 
-       /* Chip needs one FSCLK cycle in order to generate interrupts,
-        * as we cannot guarantee one will be provided by the system. Turning
-        * master mode on then off enables us to generate that FSCLK cycle
-        * with a minimum of contention on the clock bus.
-        */
-       regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
-       regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
        ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
                nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                "nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int nau8825_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
-       disable_irq(client->irq);
-       regcache_cache_only(nau8825->regmap, true);
-       regcache_mark_dirty(nau8825->regmap);
-
-       return 0;
-}
-
-static int nau8825_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
-       regcache_cache_only(nau8825->regmap, false);
-       regcache_sync(nau8825->regmap);
-       enable_irq(client->irq);
-
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops nau8825_pm = {
-       SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
-};
-
 static const struct i2c_device_id nau8825_i2c_ids[] = {
        { "nau8825", 0 },
        { }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
                .name = "nau8825",
                .of_match_table = of_match_ptr(nau8825_of_ids),
                .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
-               .pm = &nau8825_pm,
        },
        .probe = nau8825_i2c_probe,
        .remove = nau8825_i2c_remove,
index e8b5ba04417a9dee6ace5d136fb1c97c752254a2..09e8988bbb2d0fb3ddb4fcea1667fe8b56703a39 100644 (file)
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
 
 /* Interface data select */
 static const char * const rt5640_data_select[] = {
-       "Normal", "left copy to right", "right copy to left", "Swap"};
+       "Normal", "Swap", "left copy to right", "right copy to left"};
 
 static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
                            RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
index 1761c3a98b76234f7d0ffd2edac3c38fa1119148..58b664b06c166caecdac1400be9d9f0e4eebb190 100644 (file)
 #define RT5640_IF1_DAC_SEL_MASK                        (0x3 << 14)
 #define RT5640_IF1_DAC_SEL_SFT                 14
 #define RT5640_IF1_DAC_SEL_NOR                 (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R                 (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L                 (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP                        (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP                        (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R                 (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L                 (0x3 << 14)
 #define RT5640_IF1_ADC_SEL_MASK                        (0x3 << 12)
 #define RT5640_IF1_ADC_SEL_SFT                 12
 #define RT5640_IF1_ADC_SEL_NOR                 (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R                 (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L                 (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP                        (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP                        (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R                 (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L                 (0x3 << 12)
 #define RT5640_IF2_DAC_SEL_MASK                        (0x3 << 10)
 #define RT5640_IF2_DAC_SEL_SFT                 10
 #define RT5640_IF2_DAC_SEL_NOR                 (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R                 (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L                 (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP                        (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP                        (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R                 (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L                 (0x3 << 10)
 #define RT5640_IF2_ADC_SEL_MASK                        (0x3 << 8)
 #define RT5640_IF2_ADC_SEL_SFT                 8
 #define RT5640_IF2_ADC_SEL_NOR                 (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R                 (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L                 (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP                        (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP                        (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R                 (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L                 (0x3 << 8)
 #define RT5640_IF3_DAC_SEL_MASK                        (0x3 << 6)
 #define RT5640_IF3_DAC_SEL_SFT                 6
 #define RT5640_IF3_DAC_SEL_NOR                 (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R                 (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L                 (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP                        (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP                        (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R                 (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L                 (0x3 << 6)
 #define RT5640_IF3_ADC_SEL_MASK                        (0x3 << 4)
 #define RT5640_IF3_ADC_SEL_SFT                 4
 #define RT5640_IF3_ADC_SEL_NOR                 (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R                 (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L                 (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP                        (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP                        (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R                 (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L                 (0x3 << 4)
 
 /* REC Left Mixer Control 1 (0x3b) */
 #define RT5640_G_HP_L_RM_L_MASK                        (0x7 << 13)
index a8b3e3f701f964bdd3b54869a655d9cbfec74264..1bae17ee88175ed2556cebe4bf3f554bf721acab 100644 (file)
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
 {
        struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
 
        wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
 
        priv->core.arizona->dapm = NULL;
 
+       arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+       arizona_free_spk(codec);
+
        return 0;
 }
 
index 83ba70fe16e69488a473a68bcc97d4cc2408874f..2728ac545ffe64463a44a2f4b2b70df52df7b9e8 100644 (file)
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
 
        arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
 
+       arizona_free_spk(codec);
+
        return 0;
 }
 
index 88223608a33f270adbcbec29e69385aba8b147f2..720a14e0687d8140f95279b4bbe662ded13ff2e2 100644 (file)
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
                break;
        default:
                dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
-               dspclk = wm8962->sysclk;
+               dspclk = wm8962->sysclk_rate;
        }
 
        dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
index 52d766efe14f0d318844d43bc19df2dcf16bee0d..6b0785b5a5c5a52b5fe92f48ba89985ddf450a8c 100644 (file)
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
 
        priv->core.arizona->dapm = NULL;
 
+       arizona_free_spk(codec);
+
        return 0;
 }
 
index 012396074a8a6da52d416d5edf10cc092bed16e5..449f66636205a823b372e17e2b297fedfcd7ee1b 100644 (file)
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
 
        priv->core.arizona->dapm = NULL;
 
+       arizona_free_spk(codec);
+
        return 0;
 }
 
index b3e6c230045792555fcb7f5a6cab38edd65266d4..1120f4f4d011cdc188dff6edd112d00ab027a769 100644 (file)
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
        tristate
        select SND_HDA_EXT_CORE
        select SND_SOC_TOPOLOGY
-       select SND_HDA_I915
        select SND_SOC_INTEL_SST
 
 config SND_SOC_INTEL_SKL_RT286_MACH
index ac60f1301e2102543a24b09319dd8b3233702743..91565229d07422414418dc7495a3f655f321e7a1 100644 (file)
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
                return 0;
 
        /* wait for pause to complete before we reset the stream */
-       while (stream->running && tries--)
+       while (stream->running && --tries)
                msleep(1);
        if (!tries) {
                dev_err(hsw->dev, "error: reset stream %d still running\n",
index a5267e8a96e0533bd8671f595f35ce53268b0a7e..2962ef22fc84bf4d018505bfa8c0cc3b86f46f3c 100644 (file)
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
        skl_ipc_int_disable(dsp);
 
        free_irq(dsp->irq, dsp);
+       dsp->cl_dev.ops.cl_cleanup_controller(dsp);
+       skl_cldma_int_disable(dsp);
+       skl_ipc_op_int_disable(dsp);
+       skl_ipc_int_disable(dsp);
+
        skl_dsp_disable_core(dsp);
 }
 EXPORT_SYMBOL_GPL(skl_dsp_free);
index 545b4e77b8aaeaaaa72662d1953281e0d3440c7a..cdb78b7e5a145d8f7a3ca0bd90c39f910643677f 100644 (file)
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
 {
        int multiplier = 1;
        struct skl_module_fmt *in_fmt, *out_fmt;
+       int in_rate, out_rate;
 
 
        /* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
 
        if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
                multiplier = 5;
-       mcfg->ibs = (in_fmt->s_freq / 1000) *
-                               (mcfg->in_fmt->channels) *
-                               (mcfg->in_fmt->bit_depth >> 3) *
-                               multiplier;
-
-       mcfg->obs = (mcfg->out_fmt->s_freq / 1000) *
-                               (mcfg->out_fmt->channels) *
-                               (mcfg->out_fmt->bit_depth >> 3) *
-                               multiplier;
+
+       if (in_fmt->s_freq % 1000)
+               in_rate = (in_fmt->s_freq / 1000) + 1;
+       else
+               in_rate = (in_fmt->s_freq / 1000);
+
+       mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
+                       (mcfg->in_fmt->bit_depth >> 3) *
+                       multiplier;
+
+       if (mcfg->out_fmt->s_freq % 1000)
+               out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
+       else
+               out_rate = (mcfg->out_fmt->s_freq / 1000);
+
+       mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
+                       (mcfg->out_fmt->bit_depth >> 3) *
+                       multiplier;
 }
 
 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
                if (!skl_is_pipe_mcps_avail(skl, mconfig))
                        return -ENOMEM;
 
+               skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
                if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
                        ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
                                mconfig->id.module_id, mconfig->guid);
                        if (ret < 0)
                                return ret;
+
+                       mconfig->m_state = SKL_MODULE_LOADED;
                }
 
                /* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
                ret = skl_tplg_set_module_params(w, ctx);
                if (ret < 0)
                        return ret;
-               skl_tplg_alloc_pipe_mcps(skl, mconfig);
        }
 
        return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
        list_for_each_entry(w_module, &pipe->w_list, node) {
                mconfig  = w_module->w->priv;
 
-               if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+               if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
+                       mconfig->m_state > SKL_MODULE_UNINIT)
                        return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
                                                mconfig->id.module_id);
        }
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
        if (!skl_is_pipe_mem_avail(skl, mconfig))
                return -ENOMEM;
 
+       skl_tplg_alloc_pipe_mem(skl, mconfig);
+       skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
        /*
         * Create a list of modules for pipe.
         * This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
                src_module = dst_module;
        }
 
-       skl_tplg_alloc_pipe_mem(skl, mconfig);
-       skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
        return 0;
 }
 
index de3c401284d9df312b13d8a1549f274a08f5a754..d2d923002d5cfdaeba501d739e14ecc66fcb470b 100644 (file)
@@ -274,10 +274,10 @@ struct skl_pipe {
 
 enum skl_module_state {
        SKL_MODULE_UNINIT = 0,
-       SKL_MODULE_INIT_DONE = 1,
-       SKL_MODULE_LOADED = 2,
-       SKL_MODULE_UNLOADED = 3,
-       SKL_MODULE_BIND_DONE = 4
+       SKL_MODULE_LOADED = 1,
+       SKL_MODULE_INIT_DONE = 2,
+       SKL_MODULE_BIND_DONE = 3,
+       SKL_MODULE_UNLOADED = 4,
 };
 
 struct skl_module_cfg {
index ab5e25aaeee38f1284701873946a81d48163fd23..3982f5536f2d82b55837c4358e0b2754ac95e2c4 100644 (file)
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
        struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
        struct skl *skl  = ebus_to_skl(ebus);
        struct hdac_bus *bus = ebus_to_hbus(ebus);
+       int ret = 0;
 
        /*
         * Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
                enable_irq_wake(bus->irq);
                pci_save_state(pci);
                pci_disable_device(pci);
-               return 0;
        } else {
-               return _skl_suspend(ebus);
+               ret = _skl_suspend(ebus);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               ret = snd_hdac_display_power(bus, false);
+               if (ret < 0)
+                       dev_err(bus->dev,
+                               "Cannot turn OFF display power on i915\n");
        }
+
+       return ret;
 }
 
 static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
 
        if (bus->irq >= 0)
                free_irq(bus->irq, (void *)bus);
-       if (bus->remap_addr)
-               iounmap(bus->remap_addr);
-
        snd_hdac_bus_free_stream_pages(bus);
        snd_hdac_stream_free_all(ebus);
        snd_hdac_link_free_all(ebus);
+
+       if (bus->remap_addr)
+               iounmap(bus->remap_addr);
+
        pci_release_regions(skl->pci);
        pci_disable_device(skl->pci);
 
        snd_hdac_ext_bus_exit(ebus);
 
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+               snd_hdac_i915_exit(&ebus->bus);
        return 0;
 }
 
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
        if (skl->tplg)
                release_firmware(skl->tplg);
 
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_i915_exit(&ebus->bus);
-
        if (pci_dev_run_wake(pci))
                pm_runtime_get_noresume(&pci->dev);
-       pci_dev_put(pci);
+
+       /* codec removal, invoke bus_device_remove */
+       snd_hdac_ext_bus_device_remove(ebus);
+
        skl_platform_unregister(&pci->dev);
        skl_free_dsp(skl);
        skl_machine_device_unregister(skl);
index 801ae1a81dfd8eab907b1273b641493c74f9ff58..c4464858bf0160397c3dcbf484a4bd6e3897c2bb 100644 (file)
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
        int count = 0;
        char *state = "not set";
 
+       /* card won't be set for the dummy component, as a spot fix
+        * we're checking for that case specifically here but in future
+        * we will ensure that the dummy component looks like others.
+        */
+       if (!cmpnt->card)
+               return 0;
+
        list_for_each_entry(w, &cmpnt->card->widgets, list) {
                if (w->dapm != dapm)
                        continue;
index d14bf411515b57195db8fcb10dd9e283738dee8c..a452ad7cec4016f603142e722b5d2535959c4581 100644 (file)
@@ -15,7 +15,6 @@ config SND_USB_AUDIO
        select SND_RAWMIDI
        select SND_PCM
        select BITREVERSE
-       select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO)
        help
          Say Y here to include support for USB audio and USB MIDI
          devices.
@@ -23,9 +22,6 @@ config SND_USB_AUDIO
          To compile this driver as a module, choose M here: the module
          will be called snd-usb-audio.
 
-config SND_USB_AUDIO_USE_MEDIA_CONTROLLER
-       bool
-
 config SND_USB_UA101
        tristate "Edirol UA-101/UA-1000 driver"
        select SND_PCM
index 8dca3c407f5a77446c1f7ea8d36db4e207bf112c..2d2d122b069f37974b7353d781023cf99cbe11ef 100644 (file)
@@ -15,8 +15,6 @@ snd-usb-audio-objs :=         card.o \
                        quirks.o \
                        stream.o
 
-snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o
-
 snd-usbmidi-lib-objs := midi.o
 
 # Toplevel Module Dependency
index 63244bbba8c76d7e738a3a0c962b3f623cdd9d7f..3fc63583a5372dbec8d09e7ba7cf7399abb54cd1 100644 (file)
@@ -66,7 +66,6 @@
 #include "format.h"
 #include "power.h"
 #include "stream.h"
-#include "media.h"
 
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("USB Audio");
@@ -612,11 +611,6 @@ static int usb_audio_probe(struct usb_interface *intf,
        if (err < 0)
                goto __error;
 
-       if (quirk->media_device) {
-               /* don't want to fail when media_snd_device_create() fails */
-               media_snd_device_create(chip, intf);
-       }
-
        usb_chip[chip->index] = chip;
        chip->num_interfaces++;
        usb_set_intfdata(intf, chip);
@@ -673,14 +667,6 @@ static void usb_audio_disconnect(struct usb_interface *intf)
                list_for_each(p, &chip->midi_list) {
                        snd_usbmidi_disconnect(p);
                }
-               /*
-                * Nice to check quirk && quirk->media_device
-                * need some special handlings. Doesn't look like
-                * we have access to quirk here
-                * Acceses mixer_list
-               */
-               media_snd_device_delete(chip);
-
                /* release mixer resources */
                list_for_each_entry(mixer, &chip->mixer_list, list) {
                        snd_usb_mixer_disconnect(mixer);
index 34a0898e2238c004c9791eb97f0ca250bd8bc5f7..71778ca4b26aafcb3dacedcc660947a5df61f7e2 100644 (file)
@@ -105,8 +105,6 @@ struct snd_usb_endpoint {
        struct list_head list;
 };
 
-struct media_ctl;
-
 struct snd_usb_substream {
        struct snd_usb_stream *stream;
        struct usb_device *dev;
@@ -158,7 +156,6 @@ struct snd_usb_substream {
        } dsd_dop;
 
        bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
-       struct media_ctl *media_ctl;
 };
 
 struct snd_usb_stream {
diff --git a/sound/usb/media.c b/sound/usb/media.c
deleted file mode 100644 (file)
index 93a50d0..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * media.c - Media Controller specific ALSA driver code
- *
- * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
- * Copyright (c) 2016 Samsung Electronics Co., Ltd.
- *
- * This file is released under the GPLv2.
- */
-
-/*
- * This file adds Media Controller support to ALSA driver
- * to use the Media Controller API to share tuner with DVB
- * and V4L2 drivers that control media device. Media device
- * is created based on existing quirks framework. Using this
- * approach, the media controller API usage can be added for
- * a specific device.
-*/
-
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-
-#include <sound/pcm.h>
-#include <sound/core.h>
-
-#include "usbaudio.h"
-#include "card.h"
-#include "mixer.h"
-#include "media.h"
-
-static int media_snd_enable_source(struct media_ctl *mctl)
-{
-       if (mctl && mctl->media_dev->enable_source)
-               return mctl->media_dev->enable_source(&mctl->media_entity,
-                                                     &mctl->media_pipe);
-       return 0;
-}
-
-static void media_snd_disable_source(struct media_ctl *mctl)
-{
-       if (mctl && mctl->media_dev->disable_source)
-               mctl->media_dev->disable_source(&mctl->media_entity);
-}
-
-int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
-                       int stream)
-{
-       struct media_device *mdev;
-       struct media_ctl *mctl;
-       struct device *pcm_dev = &pcm->streams[stream].dev;
-       u32 intf_type;
-       int ret = 0;
-       u16 mixer_pad;
-       struct media_entity *entity;
-
-       mdev = subs->stream->chip->media_dev;
-       if (!mdev)
-               return -ENODEV;
-
-       if (subs->media_ctl)
-               return 0;
-
-       /* allocate media_ctl */
-       mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
-       if (!mctl)
-               return -ENOMEM;
-
-       mctl->media_dev = mdev;
-       if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK;
-               mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK;
-               mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE;
-               mixer_pad = 1;
-       } else {
-               intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE;
-               mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE;
-               mctl->media_pad.flags = MEDIA_PAD_FL_SINK;
-               mixer_pad = 2;
-       }
-       mctl->media_entity.name = pcm->name;
-       media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad);
-       ret =  media_device_register_entity(mctl->media_dev,
-                                           &mctl->media_entity);
-       if (ret)
-               goto free_mctl;
-
-       mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
-                                                 MAJOR(pcm_dev->devt),
-                                                 MINOR(pcm_dev->devt));
-       if (!mctl->intf_devnode) {
-               ret = -ENOMEM;
-               goto unregister_entity;
-       }
-       mctl->intf_link = media_create_intf_link(&mctl->media_entity,
-                                                &mctl->intf_devnode->intf,
-                                                MEDIA_LNK_FL_ENABLED);
-       if (!mctl->intf_link) {
-               ret = -ENOMEM;
-               goto devnode_remove;
-       }
-
-       /* create link between mixer and audio */
-       media_device_for_each_entity(entity, mdev) {
-               switch (entity->function) {
-               case MEDIA_ENT_F_AUDIO_MIXER:
-                       ret = media_create_pad_link(entity, mixer_pad,
-                                                   &mctl->media_entity, 0,
-                                                   MEDIA_LNK_FL_ENABLED);
-                       if (ret)
-                               goto remove_intf_link;
-                       break;
-               }
-       }
-
-       subs->media_ctl = mctl;
-       return 0;
-
-remove_intf_link:
-       media_remove_intf_link(mctl->intf_link);
-devnode_remove:
-       media_devnode_remove(mctl->intf_devnode);
-unregister_entity:
-       media_device_unregister_entity(&mctl->media_entity);
-free_mctl:
-       kfree(mctl);
-       return ret;
-}
-
-void media_snd_stream_delete(struct snd_usb_substream *subs)
-{
-       struct media_ctl *mctl = subs->media_ctl;
-
-       if (mctl && mctl->media_dev) {
-               struct media_device *mdev;
-
-               mdev = subs->stream->chip->media_dev;
-               if (mdev && media_devnode_is_registered(&mdev->devnode)) {
-                       media_devnode_remove(mctl->intf_devnode);
-                       media_device_unregister_entity(&mctl->media_entity);
-                       media_entity_cleanup(&mctl->media_entity);
-               }
-               kfree(mctl);
-               subs->media_ctl = NULL;
-       }
-}
-
-int media_snd_start_pipeline(struct snd_usb_substream *subs)
-{
-       struct media_ctl *mctl = subs->media_ctl;
-
-       if (mctl)
-               return media_snd_enable_source(mctl);
-       return 0;
-}
-
-void media_snd_stop_pipeline(struct snd_usb_substream *subs)
-{
-       struct media_ctl *mctl = subs->media_ctl;
-
-       if (mctl)
-               media_snd_disable_source(mctl);
-}
-
-int media_snd_mixer_init(struct snd_usb_audio *chip)
-{
-       struct device *ctl_dev = &chip->card->ctl_dev;
-       struct media_intf_devnode *ctl_intf;
-       struct usb_mixer_interface *mixer;
-       struct media_device *mdev = chip->media_dev;
-       struct media_mixer_ctl *mctl;
-       u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL;
-       int ret;
-
-       if (!mdev)
-               return -ENODEV;
-
-       ctl_intf = chip->ctl_intf_media_devnode;
-       if (!ctl_intf) {
-               ctl_intf = media_devnode_create(mdev, intf_type, 0,
-                                               MAJOR(ctl_dev->devt),
-                                               MINOR(ctl_dev->devt));
-               if (!ctl_intf)
-                       return -ENOMEM;
-               chip->ctl_intf_media_devnode = ctl_intf;
-       }
-
-       list_for_each_entry(mixer, &chip->mixer_list, list) {
-
-               if (mixer->media_mixer_ctl)
-                       continue;
-
-               /* allocate media_mixer_ctl */
-               mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
-               if (!mctl)
-                       return -ENOMEM;
-
-               mctl->media_dev = mdev;
-               mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER;
-               mctl->media_entity.name = chip->card->mixername;
-               mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK;
-               mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE;
-               mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE;
-               media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX,
-                                 mctl->media_pad);
-               ret =  media_device_register_entity(mctl->media_dev,
-                                                   &mctl->media_entity);
-               if (ret) {
-                       kfree(mctl);
-                       return ret;
-               }
-
-               mctl->intf_link = media_create_intf_link(&mctl->media_entity,
-                                                        &ctl_intf->intf,
-                                                        MEDIA_LNK_FL_ENABLED);
-               if (!mctl->intf_link) {
-                       media_device_unregister_entity(&mctl->media_entity);
-                       media_entity_cleanup(&mctl->media_entity);
-                       kfree(mctl);
-                       return -ENOMEM;
-               }
-               mctl->intf_devnode = ctl_intf;
-               mixer->media_mixer_ctl = mctl;
-       }
-       return 0;
-}
-
-static void media_snd_mixer_delete(struct snd_usb_audio *chip)
-{
-       struct usb_mixer_interface *mixer;
-       struct media_device *mdev = chip->media_dev;
-
-       if (!mdev)
-               return;
-
-       list_for_each_entry(mixer, &chip->mixer_list, list) {
-               struct media_mixer_ctl *mctl;
-
-               mctl = mixer->media_mixer_ctl;
-               if (!mixer->media_mixer_ctl)
-                       continue;
-
-               if (media_devnode_is_registered(&mdev->devnode)) {
-                       media_device_unregister_entity(&mctl->media_entity);
-                       media_entity_cleanup(&mctl->media_entity);
-               }
-               kfree(mctl);
-               mixer->media_mixer_ctl = NULL;
-       }
-       if (media_devnode_is_registered(&mdev->devnode))
-               media_devnode_remove(chip->ctl_intf_media_devnode);
-       chip->ctl_intf_media_devnode = NULL;
-}
-
-int media_snd_device_create(struct snd_usb_audio *chip,
-                       struct usb_interface *iface)
-{
-       struct media_device *mdev;
-       struct usb_device *usbdev = interface_to_usbdev(iface);
-       int ret;
-
-       mdev = media_device_get_devres(&usbdev->dev);
-       if (!mdev)
-               return -ENOMEM;
-       if (!mdev->dev) {
-               /* register media device */
-               mdev->dev = &usbdev->dev;
-               if (usbdev->product)
-                       strlcpy(mdev->model, usbdev->product,
-                               sizeof(mdev->model));
-               if (usbdev->serial)
-                       strlcpy(mdev->serial, usbdev->serial,
-                               sizeof(mdev->serial));
-               strcpy(mdev->bus_info, usbdev->devpath);
-               mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice);
-               media_device_init(mdev);
-       }
-       if (!media_devnode_is_registered(&mdev->devnode)) {
-               ret = media_device_register(mdev);
-               if (ret) {
-                       dev_err(&usbdev->dev,
-                               "Couldn't register media device. Error: %d\n",
-                               ret);
-                       return ret;
-               }
-       }
-
-       /* save media device - avoid lookups */
-       chip->media_dev = mdev;
-
-       /* Create media entities for mixer and control dev */
-       ret = media_snd_mixer_init(chip);
-       if (ret) {
-               dev_err(&usbdev->dev,
-                       "Couldn't create media mixer entities. Error: %d\n",
-                       ret);
-
-               /* clear saved media_dev */
-               chip->media_dev = NULL;
-
-               return ret;
-       }
-       return 0;
-}
-
-void media_snd_device_delete(struct snd_usb_audio *chip)
-{
-       struct media_device *mdev = chip->media_dev;
-
-       media_snd_mixer_delete(chip);
-
-       if (mdev) {
-               if (media_devnode_is_registered(&mdev->devnode))
-                       media_device_unregister(mdev);
-               chip->media_dev = NULL;
-       }
-}
diff --git a/sound/usb/media.h b/sound/usb/media.h
deleted file mode 100644 (file)
index 1dcdcdc..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * media.h - Media Controller specific ALSA driver code
- *
- * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
- * Copyright (c) 2016 Samsung Electronics Co., Ltd.
- *
- * This file is released under the GPLv2.
- */
-
-/*
- * This file adds Media Controller support to ALSA driver
- * to use the Media Controller API to share tuner with DVB
- * and V4L2 drivers that control media device. Media device
- * is created based on existing quirks framework. Using this
- * approach, the media controller API usage can be added for
- * a specific device.
-*/
-#ifndef __MEDIA_H
-
-#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER
-
-#include <media/media-device.h>
-#include <media/media-entity.h>
-#include <sound/asound.h>
-
-struct media_ctl {
-       struct media_device *media_dev;
-       struct media_entity media_entity;
-       struct media_intf_devnode *intf_devnode;
-       struct media_link *intf_link;
-       struct media_pad media_pad;
-       struct media_pipeline media_pipe;
-};
-
-/*
- * One source pad each for SNDRV_PCM_STREAM_CAPTURE and
- * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link
- * to AUDIO Source
-*/
-#define MEDIA_MIXER_PAD_MAX    (SNDRV_PCM_STREAM_LAST + 2)
-
-struct media_mixer_ctl {
-       struct media_device *media_dev;
-       struct media_entity media_entity;
-       struct media_intf_devnode *intf_devnode;
-       struct media_link *intf_link;
-       struct media_pad media_pad[MEDIA_MIXER_PAD_MAX];
-       struct media_pipeline media_pipe;
-};
-
-int media_snd_device_create(struct snd_usb_audio *chip,
-                           struct usb_interface *iface);
-void media_snd_device_delete(struct snd_usb_audio *chip);
-int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
-                         int stream);
-void media_snd_stream_delete(struct snd_usb_substream *subs);
-int media_snd_start_pipeline(struct snd_usb_substream *subs);
-void media_snd_stop_pipeline(struct snd_usb_substream *subs);
-#else
-static inline int media_snd_device_create(struct snd_usb_audio *chip,
-                                         struct usb_interface *iface)
-                                               { return 0; }
-static inline void media_snd_device_delete(struct snd_usb_audio *chip) { }
-static inline int media_snd_stream_init(struct snd_usb_substream *subs,
-                                       struct snd_pcm *pcm, int stream)
-                                               { return 0; }
-static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { }
-static inline int media_snd_start_pipeline(struct snd_usb_substream *subs)
-                                       { return 0; }
-static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { }
-#endif
-#endif /* __MEDIA_H */
index f3789446ab9c49717e1dc0fa1c93e56162a08fbb..3417ef347e40432482b84de271a3bb98c8724297 100644 (file)
@@ -3,8 +3,6 @@
 
 #include <sound/info.h>
 
-struct media_mixer_ctl;
-
 struct usb_mixer_interface {
        struct snd_usb_audio *chip;
        struct usb_host_interface *hostif;
@@ -24,7 +22,6 @@ struct usb_mixer_interface {
        struct urb *rc_urb;
        struct usb_ctrlrequest *rc_setup_packet;
        u8 rc_buffer[6];
-       struct media_mixer_ctl *media_mixer_ctl;
 };
 
 #define MAX_CHANNELS   16      /* max logical channels */
index ddca6547399b0103b37abbdf73cfa5c9e710a7fd..1f8fb0d904e059d4e6084d0c221489d598e9e4c1 100644 (file)
@@ -348,6 +348,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
        { 0 }   /* terminator */
 };
 
+/*
+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
+ * screwed up when zero volume is passed; just skip it as a workaround
+ */
+static const struct usbmix_name_map dell_alc4020_map[] = {
+       { 16, NULL },
+       { 19, NULL },
+       { 0 }
+};
+
 /*
  * Control map entries
  */
@@ -430,6 +440,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x0ccd, 0x0028),
                .map = aureon_51_2_map,
        },
+       {
+               .id = USB_ID(0x0bda, 0x4014),
+               .map = dell_alc4020_map,
+       },
        {
                .id = USB_ID(0x0dba, 0x1000),
                .map = mbox1_map,
index 0e4e0640c50486b608b4a3df4c83b18fd11cae17..44d178ee9177565bba3fde9d2ac2b7308e8dd7ae 100644 (file)
@@ -35,7 +35,6 @@
 #include "pcm.h"
 #include "clock.h"
 #include "power.h"
-#include "media.h"
 
 #define SUBSTREAM_FLAG_DATA_EP_STARTED 0
 #define SUBSTREAM_FLAG_SYNC_EP_STARTED 1
@@ -718,14 +717,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
        struct audioformat *fmt;
        int ret;
 
-       ret = media_snd_start_pipeline(subs);
-       if (ret)
-               return ret;
-
        ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
                                               params_buffer_bytes(hw_params));
        if (ret < 0)
-               goto err_ret;
+               return ret;
 
        subs->pcm_format = params_format(hw_params);
        subs->period_bytes = params_period_bytes(hw_params);
@@ -739,27 +734,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                dev_dbg(&subs->dev->dev,
                        "cannot set format: format = %#x, rate = %d, channels = %d\n",
                           subs->pcm_format, subs->cur_rate, subs->channels);
-               ret = -EINVAL;
-               goto err_ret;
+               return -EINVAL;
        }
 
        ret = snd_usb_lock_shutdown(subs->stream->chip);
        if (ret < 0)
-               goto err_ret;
+               return ret;
        ret = set_format(subs, fmt);
        snd_usb_unlock_shutdown(subs->stream->chip);
        if (ret < 0)
-               goto err_ret;
+               return ret;
 
        subs->interface = fmt->iface;
        subs->altset_idx = fmt->altset_idx;
        subs->need_setup_ep = true;
 
        return 0;
-
-err_ret:
-       media_snd_stop_pipeline(subs);
-       return ret;
 }
 
 /*
@@ -771,7 +761,6 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
 {
        struct snd_usb_substream *subs = substream->runtime->private_data;
 
-       media_snd_stop_pipeline(subs);
        subs->cur_audiofmt = NULL;
        subs->cur_rate = 0;
        subs->period_bytes = 0;
@@ -1232,7 +1221,6 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
        struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct snd_usb_substream *subs = &as->substream[direction];
-       int ret;
 
        subs->interface = -1;
        subs->altset_idx = 0;
@@ -1246,12 +1234,7 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
        subs->dsd_dop.channel = 0;
        subs->dsd_dop.marker = 1;
 
-       ret = setup_hw_info(runtime, subs);
-       if (ret == 0)
-               ret = media_snd_stream_init(subs, as->pcm, direction);
-       if (ret)
-               snd_usb_autosuspend(subs->stream->chip);
-       return ret;
+       return setup_hw_info(runtime, subs);
 }
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
@@ -1260,7 +1243,6 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
        struct snd_usb_substream *subs = &as->substream[direction];
 
        stop_endpoints(subs, true);
-       media_snd_stop_pipeline(subs);
 
        if (subs->interface >= 0 &&
            !snd_usb_lock_shutdown(subs->stream->chip)) {
index 9d087b19c70c861e9a4bb219570c88aa2d15dba9..c60a776e815d72f14b9b6345f2e8a0266f8ec1b6 100644 (file)
@@ -2886,7 +2886,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                .product_name = pname, \
                .ifnum = QUIRK_ANY_INTERFACE, \
                .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
-               .media_device = 1, \
        } \
 }
 
index 6178bb5d07318ac7781b1f27fdb2b41a54f1f032..0adfd9537cf766bbe130c71c4cc029091b2d4118 100644 (file)
@@ -1134,9 +1134,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
                return true;
        }
index 6fe7f210bd4eae0a57095ad46da85b3ec4726adf..8e9548bc1f1a922e6ceb5c99450da8ba28ee22ff 100644 (file)
@@ -36,7 +36,6 @@
 #include "format.h"
 #include "clock.h"
 #include "stream.h"
-#include "media.h"
 
 /*
  * free a substream
@@ -53,7 +52,6 @@ static void free_substream(struct snd_usb_substream *subs)
                kfree(fp);
        }
        kfree(subs->rate_list.list);
-       media_snd_stream_delete(subs);
 }
 
 
index a161c7c1b126651c558b0140678ad0d1e6f432d8..b665d85555cb3aad0c9a621232f28d8e1f75344a 100644 (file)
@@ -30,9 +30,6 @@
  *
  */
 
-struct media_device;
-struct media_intf_devnode;
-
 struct snd_usb_audio {
        int index;
        struct usb_device *dev;
@@ -63,8 +60,6 @@ struct snd_usb_audio {
        bool autoclock;                 /* from the 'autoclock' module param */
 
        struct usb_host_interface *ctrl_intf;   /* the audio control interface */
-       struct media_device *media_dev;
-       struct media_intf_devnode *ctl_intf_media_devnode;
 };
 
 #define usb_audio_err(chip, fmt, args...) \
@@ -115,7 +110,6 @@ struct snd_usb_audio_quirk {
        const char *product_name;
        int16_t ifnum;
        uint16_t type;
-       bool media_device;
        const void *data;
 };
 
index 5334ad9d39b7fc1853d4049b8ed8ce6358c27879..1069d96248c168960b225fcda8a7bad60edf0db0 100755 (executable)
@@ -3,7 +3,7 @@
 make &> /dev/null
 
 for i in `ls tests/*.c`; do
-       testname=$(basename -s .c "$i")
+       testname=$(basename "$i" .c)
        gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
        echo -ne "$testname... "
        if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
@@ -11,11 +11,13 @@ for i in `ls tests/*.c`; do
        else
                echo "FAILED!"
        fi
-       rm tests/$testname
+       if [ -f "tests/$testname" ]; then
+               rm tests/$testname
+       fi
 done
 
 for i in `ls tests/*.c`; do
-       testname=$(basename -s .c "$i")
+       testname=$(basename "$i" .c)
        gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
        echo -ne "(PRELOAD) $testname... "
        if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
@@ -23,5 +25,7 @@ for i in `ls tests/*.c`; do
        else
                echo "FAILED!"
        fi
-       rm tests/$testname
+       if [ -f "tests/$testname" ]; then
+               rm tests/$testname
+       fi
 done
index 5b32413409459ce165e2f56990dbc0f52aa8bd31..544b05a53b7057bbe9092e31f600065d47e3b24e 100644 (file)
@@ -98,6 +98,9 @@ static char *get_klog_buff(unsigned int *klen)
        char *buff;
 
        len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+       if (len < 0)
+               return NULL;
+
        buff = malloc(len);
        if (!buff)
                return NULL;
index 5a95896105bc62fbd707f2feae9178d0c69f08ca..55a60d331f47400065d7c37cf32dd97d11f83ff7 100644 (file)
@@ -299,18 +299,38 @@ they mean, and suggestions for how to fix them.
 Errors in .c files
 ------------------
 
-If you're getting an objtool error in a compiled .c file, chances are
-the file uses an asm() statement which has a "call" instruction.  An
-asm() statement with a call instruction must declare the use of the
-stack pointer in its output operand.  For example, on x86_64:
+1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
 
-   register void *__sp asm("rsp");
-   asm volatile("call func" : "+r" (__sp));
+   This means that funcA() doesn't end with a return instruction or an
+   unconditional jump, and that objtool has determined that the function
+   can fall through into the next function.  There could be different
+   reasons for this:
 
-Otherwise the stack frame may not get created before the call.
+   1) funcA()'s last instruction is a call to a "noreturn" function like
+      panic().  In this case the noreturn function needs to be added to
+      objtool's hard-coded global_noreturns array.  Feel free to bug the
+      objtool maintainer, or you can submit a patch.
 
-Another possible cause for errors in C code is if the Makefile removes
--fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+   2) funcA() uses the unreachable() annotation in a section of code
+      that is actually reachable.
+
+   3) If funcA() calls an inline function, the object code for funcA()
+      might be corrupt due to a gcc bug.  For more details, see:
+      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+
+2. If you're getting any other objtool error in a compiled .c file, it
+   may be because the file uses an asm() statement which has a "call"
+   instruction.  An asm() statement with a call instruction must declare
+   the use of the stack pointer in its output operand.  For example, on
+   x86_64:
+
+     register void *__sp asm("rsp");
+     asm volatile("call func" : "+r" (__sp));
+
+   Otherwise the stack frame may not get created before the call.
+
+3. Another possible cause for errors in C code is if the Makefile removes
+   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Also see the above section for .S file errors for more information what
 the individual error messages mean.
index 7515cb2e879a1440414d51da7239aedf00d8ae0b..e8a1e69eb92c5235735d42847e4f34b2e737a972 100644 (file)
@@ -54,6 +54,7 @@ struct instruction {
        struct symbol *call_dest;
        struct instruction *jump_dest;
        struct list_head alts;
+       struct symbol *func;
 };
 
 struct alternative {
@@ -66,6 +67,7 @@ struct objtool_file {
        struct list_head insn_list;
        DECLARE_HASHTABLE(insn_hash, 16);
        struct section *rodata, *whitelist;
+       bool ignore_unreachables, c_file;
 };
 
 const char *objname;
@@ -228,7 +230,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                        }
                }
 
-               if (insn->type == INSN_JUMP_DYNAMIC)
+               if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
                        /* sibling call */
                        return 0;
        }
@@ -248,6 +250,7 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
 static int decode_instructions(struct objtool_file *file)
 {
        struct section *sec;
+       struct symbol *func;
        unsigned long offset;
        struct instruction *insn;
        int ret;
@@ -281,6 +284,21 @@ static int decode_instructions(struct objtool_file *file)
                        hash_add(file->insn_hash, &insn->hash, insn->offset);
                        list_add_tail(&insn->list, &file->insn_list);
                }
+
+               list_for_each_entry(func, &sec->symbol_list, list) {
+                       if (func->type != STT_FUNC)
+                               continue;
+
+                       if (!find_insn(file, sec, func->offset)) {
+                               WARN("%s(): can't find starting instruction",
+                                    func->name);
+                               return -1;
+                       }
+
+                       func_for_each_insn(file, func, insn)
+                               if (!insn->func)
+                                       insn->func = func;
+               }
        }
 
        return 0;
@@ -664,13 +682,40 @@ static int add_func_switch_tables(struct objtool_file *file,
                                                text_rela->addend);
 
                /*
-                * TODO: Document where this is needed, or get rid of it.
-                *
                 * rare case:   jmpq *[addr](%rip)
+                *
+                * This check is for a rare gcc quirk, currently only seen in
+                * three driver functions in the kernel, only with certain
+                * obscure non-distro configs.
+                *
+                * As part of an optimization, gcc makes a copy of an existing
+                * switch jump table, modifies it, and then hard-codes the jump
+                * (albeit with an indirect jump) to use a single entry in the
+                * table.  The rest of the jump table and some of its jump
+                * targets remain as dead code.
+                *
+                * In such a case we can just crudely ignore all unreachable
+                * instruction warnings for the entire object file.  Ideally we
+                * would just ignore them for the function, but that would
+                * require redesigning the code quite a bit.  And honestly
+                * that's just not worth doing: unreachable instruction
+                * warnings are of questionable value anyway, and this is such
+                * a rare issue.
+                *
+                * kbuild reports:
+                * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
+                * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
+                * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
+                *
+                * gcc bug:
+                * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
                 */
-               if (!rodata_rela)
+               if (!rodata_rela) {
                        rodata_rela = find_rela_by_dest(file->rodata,
                                                        text_rela->addend + 4);
+                       if (rodata_rela)
+                               file->ignore_unreachables = true;
+               }
 
                if (!rodata_rela)
                        continue;
@@ -732,9 +777,6 @@ static int decode_sections(struct objtool_file *file)
 {
        int ret;
 
-       file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
-       file->rodata = find_section_by_name(file->elf, ".rodata");
-
        ret = decode_instructions(file);
        if (ret)
                return ret;
@@ -799,6 +841,7 @@ static int validate_branch(struct objtool_file *file,
        struct alternative *alt;
        struct instruction *insn;
        struct section *sec;
+       struct symbol *func = NULL;
        unsigned char state;
        int ret;
 
@@ -813,6 +856,16 @@ static int validate_branch(struct objtool_file *file,
        }
 
        while (1) {
+               if (file->c_file && insn->func) {
+                       if (func && func != insn->func) {
+                               WARN("%s() falls through to next function %s()",
+                                    func->name, insn->func->name);
+                               return 1;
+                       }
+
+                       func = insn->func;
+               }
+
                if (insn->visited) {
                        if (frame_state(insn->state) != frame_state(state)) {
                                WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@ static int validate_branch(struct objtool_file *file,
                        return 0;
                }
 
-               /*
-                * Catch a rare case where a noreturn function falls through to
-                * the next function.
-                */
-               if (is_fentry_call(insn) && (state & STATE_FENTRY))
-                       return 0;
-
                insn->visited = true;
                insn->state = state;
 
@@ -1035,12 +1081,8 @@ static int validate_functions(struct objtool_file *file)
                                continue;
 
                        insn = find_insn(file, sec, func->offset);
-                       if (!insn) {
-                               WARN("%s(): can't find starting instruction",
-                                    func->name);
-                               warnings++;
+                       if (!insn)
                                continue;
-                       }
 
                        ret = validate_branch(file, insn, 0);
                        warnings += ret;
@@ -1056,13 +1098,14 @@ static int validate_functions(struct objtool_file *file)
                                if (insn->visited)
                                        continue;
 
-                               if (!ignore_unreachable_insn(func, insn) &&
-                                   !warnings) {
-                                       WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-                                       warnings++;
-                               }
-
                                insn->visited = true;
+
+                               if (file->ignore_unreachables || warnings ||
+                                   ignore_unreachable_insn(func, insn))
+                                       continue;
+
+                               WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+                               warnings++;
                        }
                }
        }
@@ -1133,6 +1176,10 @@ int cmd_check(int argc, const char **argv)
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
+       file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
+       file.rodata = find_section_by_name(file.elf, ".rodata");
+       file.ignore_unreachables = false;
+       file.c_file = find_section_by_name(file.elf, ".comment");
 
        ret = decode_sections(&file);
        if (ret < 0)
index 2e1fa2357528a01d293fa00a8c61c9f0ec29bad6..8c8c6b9ce915341dfc01a02185a53b4198b7d790 100644 (file)
@@ -74,6 +74,7 @@ arch/*/include/uapi/asm/unistd*.h
 arch/*/include/uapi/asm/perf_regs.h
 arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
+arch/*/include/asm/*features.h
 include/linux/poison.h
 include/linux/hw_breakpoint.h
 include/uapi/linux/perf_event.h
index 6138bdef6e63d9f3da37716866f2a38544c2a0c1..f8ccee132867c566dabf4bb727e9c17d78637894 100644 (file)
@@ -4,6 +4,8 @@
 #include <stdlib.h>
 #include <string.h>
 #include <linux/stringify.h>
+#include "header.h"
+#include "util.h"
 
 #define mfspr(rn)       ({unsigned long rval; \
                         asm volatile("mfspr %0," __stringify(rn) \
index 238aa3927c71e1ad8c57510641e303cc2a92d6f8..f2d9c5fe58e09f62bc517025bcbb85f15fce429a 100755 (executable)
@@ -15,7 +15,7 @@ TMP_DEST=$(mktemp -d)
 tar xf ${TARBALL} -C $TMP_DEST
 rm -f ${TARBALL}
 cd - > /dev/null
-make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1
+make -C $TMP_DEST/perf*/tools/perf > /dev/null
 RC=$?
 rm -rf ${TMP_DEST}
 exit $RC
index 4b98165559462025ded4db4a41fe8b5e66a9f151..2a83414159a65a026195f102c164ed6f6eae45be 100644 (file)
@@ -337,7 +337,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
        chain = list_entry(node->val.next, struct callchain_list, list);
        chain->has_children = has_sibling;
 
-       if (node->val.next != node->val.prev) {
+       if (!list_empty(&node->val)) {
                chain = list_entry(node->val.prev, struct callchain_list, list);
                chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
        }
index 52cf479bc5935d3f74edbf7d3c8733d4191e268a..dad55d04ffdd5074c212fac7dbcd306444f008e6 100644 (file)
@@ -56,13 +56,22 @@ const char *perf_event__name(unsigned int id)
        return perf_event__names[id];
 }
 
-static struct perf_sample synth_sample = {
+static int perf_tool__process_synth_event(struct perf_tool *tool,
+                                         union perf_event *event,
+                                         struct machine *machine,
+                                         perf_event__handler_t process)
+{
+       struct perf_sample synth_sample = {
        .pid       = -1,
        .tid       = -1,
        .time      = -1,
        .stream_id = -1,
        .cpu       = -1,
        .period    = 1,
+       .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
+       };
+
+       return process(tool, event, &synth_sample, machine);
 };
 
 /*
@@ -186,7 +195,7 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool,
        if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
                return -1;
 
-       if (process(tool, event, &synth_sample, machine) != 0)
+       if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
                return -1;
 
        return tgid;
@@ -218,7 +227,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool,
 
        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 
-       if (process(tool, event, &synth_sample, machine) != 0)
+       if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
                return -1;
 
        return 0;
@@ -344,7 +353,7 @@ out:
                event->mmap2.pid = tgid;
                event->mmap2.tid = pid;
 
-               if (process(tool, event, &synth_sample, machine) != 0) {
+               if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
                        rc = -1;
                        break;
                }
@@ -402,7 +411,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
 
                memcpy(event->mmap.filename, pos->dso->long_name,
                       pos->dso->long_name_len + 1);
-               if (process(tool, event, &synth_sample, machine) != 0) {
+               if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
                        rc = -1;
                        break;
                }
@@ -472,7 +481,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                /*
                 * Send the prepared comm event
                 */
-               if (process(tool, comm_event, &synth_sample, machine) != 0)
+               if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
                        break;
 
                rc = 0;
@@ -701,7 +710,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        event->mmap.len   = map->end - event->mmap.start;
        event->mmap.pid   = machine->pid;
 
-       err = process(tool, event, &synth_sample, machine);
+       err = perf_tool__process_synth_event(tool, event, machine, process);
        free(event);
 
        return err;
index cd67e64a0494be00b750e057545a916402de5a19..2fbeb59c4bdd91b2b9de943d153c3674c46ca125 100644 (file)
@@ -9,36 +9,32 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 
 #if   defined(__arm__)
 #define GEN_ELF_ARCH   EM_ARM
-#define GEN_ELF_ENDIAN ELFDATA2LSB
 #define GEN_ELF_CLASS  ELFCLASS32
 #elif defined(__aarch64__)
 #define GEN_ELF_ARCH   EM_AARCH64
-#define GEN_ELF_ENDIAN ELFDATA2LSB
 #define GEN_ELF_CLASS  ELFCLASS64
 #elif defined(__x86_64__)
 #define GEN_ELF_ARCH   EM_X86_64
-#define GEN_ELF_ENDIAN ELFDATA2LSB
 #define GEN_ELF_CLASS  ELFCLASS64
 #elif defined(__i386__)
 #define GEN_ELF_ARCH   EM_386
-#define GEN_ELF_ENDIAN ELFDATA2LSB
 #define GEN_ELF_CLASS  ELFCLASS32
-#elif defined(__ppcle__)
-#define GEN_ELF_ARCH   EM_PPC
-#define GEN_ELF_ENDIAN ELFDATA2LSB
-#define GEN_ELF_CLASS  ELFCLASS64
-#elif defined(__powerpc__)
-#define GEN_ELF_ARCH   EM_PPC64
-#define GEN_ELF_ENDIAN ELFDATA2MSB
-#define GEN_ELF_CLASS  ELFCLASS64
-#elif defined(__powerpcle__)
+#elif defined(__powerpc64__)
 #define GEN_ELF_ARCH   EM_PPC64
-#define GEN_ELF_ENDIAN ELFDATA2LSB
 #define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__powerpc__)
+#define GEN_ELF_ARCH   EM_PPC
+#define GEN_ELF_CLASS  ELFCLASS32
 #else
 #error "unsupported architecture"
 #endif
 
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define GEN_ELF_ENDIAN ELFDATA2MSB
+#else
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#endif
+
 #if GEN_ELF_CLASS == ELFCLASS64
 #define elf_newehdr    elf64_newehdr
 #define elf_getshdr    elf64_getshdr
index 6bc3ecd2e7ca3d8e61b2d5ec9fa0714b319182a9..abf1366e2a24d3bcf439434f96075320000e49a9 100644 (file)
@@ -279,6 +279,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
        event.sample.header.misc = PERF_RECORD_MISC_USER;
        event.sample.header.size = sizeof(struct perf_event_header);
 
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.ip = le64_to_cpu(branch->from);
        sample.pid = btsq->pid;
        sample.tid = btsq->tid;
index 05d815851be19bd40e00672c913a83ad2003175a..6175784409896425154dcfd9c49560e9f2cb7e28 100644 (file)
@@ -979,6 +979,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
        if (!pt->timeless_decoding)
                sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.ip = ptq->state->from_ip;
        sample.pid = ptq->pid;
        sample.tid = ptq->tid;
@@ -1035,6 +1036,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
        if (!pt->timeless_decoding)
                sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.ip = ptq->state->from_ip;
        sample.pid = ptq->pid;
        sample.tid = ptq->tid;
@@ -1092,6 +1094,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
        if (!pt->timeless_decoding)
                sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.ip = ptq->state->from_ip;
        sample.pid = ptq->pid;
        sample.tid = ptq->tid;
@@ -1127,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
                pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
                       ret);
 
-       if (pt->synth_opts.callchain)
+       if (pt->synth_opts.last_branch)
                intel_pt_reset_last_branch_rb(ptq);
 
        return ret;
index cd272cc21e05b1325c68f4ff4aae442c45a0364a..ad0c0bb1fbc78f5d131a8b831ecaf2e323ce0150 100644 (file)
@@ -417,6 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
         * use first address as sample address
         */
        memset(&sample, 0, sizeof(sample));
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.pid  = pid;
        sample.tid  = tid;
        sample.time = id->time;
@@ -505,6 +506,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
         * use first address as sample address
         */
        memset(&sample, 0, sizeof(sample));
+       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.pid  = pid;
        sample.tid  = tid;
        sample.time = id->time;
index 20a257a12ea5000707edd0ba34a4368df79e35e2..acbf7ff2ee6eed9b1ea38548d3ac5f96c50b65be 100644 (file)
@@ -66,6 +66,8 @@ unsigned int do_slm_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
+unsigned int do_irtl_snb;
+unsigned int do_irtl_hsw;
 unsigned int units = 1000000;  /* MHz etc */
 unsigned int genuine_intel;
 unsigned int has_invariant_tsc;
@@ -187,7 +189,7 @@ struct pkg_data {
        unsigned long long pkg_any_core_c0;
        unsigned long long pkg_any_gfxe_c0;
        unsigned long long pkg_both_core_gfxe_c0;
-       unsigned long long gfx_rc6_ms;
+       long long gfx_rc6_ms;
        unsigned int gfx_mhz;
        unsigned int package_id;
        unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
@@ -621,8 +623,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "%8d", p->pkg_temp_c);
 
        /* GFXrc6 */
-       if (do_gfx_rc6_ms)
-               outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float);
+       if (do_gfx_rc6_ms) {
+               if (p->gfx_rc6_ms == -1) {      /* detect counter reset */
+                       outp += sprintf(outp, "  ***.**");
+               } else {
+                       outp += sprintf(outp, "%8.2f",
+                               p->gfx_rc6_ms / 10.0 / interval_float);
+               }
+       }
 
        /* GFXMHz */
        if (do_gfx_mhz)
@@ -766,7 +774,12 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
        old->pc10 = new->pc10 - old->pc10;
        old->pkg_temp_c = new->pkg_temp_c;
 
-       old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
+       /* flag an error when rc6 counter resets/wraps */
+       if (old->gfx_rc6_ms >  new->gfx_rc6_ms)
+               old->gfx_rc6_ms = -1;
+       else
+               old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
+
        old->gfx_mhz = new->gfx_mhz;
 
        DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -1296,6 +1309,7 @@ int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S,
 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
 
 static void
@@ -1579,6 +1593,47 @@ dump_config_tdp(void)
        fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
        fprintf(outf, ")\n");
 }
+
+unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+void print_irtl(void)
+{
+       unsigned long long msr;
+
+       get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+       get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+       get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+       if (!do_irtl_hsw)
+               return;
+
+       get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+       get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+       get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
+       fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
+       fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+               (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+}
 void free_fd_percpu(void)
 {
        int i;
@@ -2144,6 +2199,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case 0x56:      /* BDX-DE */
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
+       case 0x55:      /* SKX */
                pkg_cstate_limits = hsw_pkg_cstate_limits;
                break;
        case 0x37:      /* BYT */
@@ -2156,6 +2214,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case 0x57:      /* PHI */
                pkg_cstate_limits = phi_pkg_cstate_limits;
                break;
+       case 0x5C:      /* BXT */
+               pkg_cstate_limits = bxt_pkg_cstate_limits;
+               break;
        default:
                return 0;
        }
@@ -2248,6 +2309,9 @@ int has_config_tdp(unsigned int family, unsigned int model)
        case 0x56:      /* BDX-DE */
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
+       case 0x55:      /* SKX */
 
        case 0x57:      /* Knights Landing */
                return 1;
@@ -2585,13 +2649,19 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x47:      /* BDW */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
                break;
+       case 0x5C:      /* BXT */
+               do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
+               break;
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x55:      /* SKX */
        case 0x57:      /* KNL */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
@@ -2871,6 +2941,10 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case 0x56:      /* BDX-DE */
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
+       case 0x55:      /* SKX */
+       case 0x5C:      /* BXT */
                return 1;
        }
        return 0;
@@ -2879,9 +2953,14 @@ int has_snb_msrs(unsigned int family, unsigned int model)
 /*
  * HSW adds support for additional MSRs:
  *
- * MSR_PKG_C8_RESIDENCY            0x00000630
- * MSR_PKG_C9_RESIDENCY            0x00000631
- * MSR_PKG_C10_RESIDENCY           0x00000632
+ * MSR_PKG_C8_RESIDENCY                0x00000630
+ * MSR_PKG_C9_RESIDENCY                0x00000631
+ * MSR_PKG_C10_RESIDENCY       0x00000632
+ *
+ * MSR_PKGC8_IRTL              0x00000633
+ * MSR_PKGC9_IRTL              0x00000634
+ * MSR_PKGC10_IRTL             0x00000635
+ *
  */
 int has_hsw_msrs(unsigned int family, unsigned int model)
 {
@@ -2893,6 +2972,9 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
        case 0x3D:      /* BDW */
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
+       case 0x5C:      /* BXT */
                return 1;
        }
        return 0;
@@ -2914,6 +2996,8 @@ int has_skl_msrs(unsigned int family, unsigned int model)
        switch (model) {
        case 0x4E:      /* SKL */
        case 0x5E:      /* SKL */
+       case 0x8E:      /* KBL */
+       case 0x9E:      /* KBL */
                return 1;
        }
        return 0;
@@ -3187,7 +3271,7 @@ void process_cpuid()
        if (debug)
                decode_misc_enable_msr();
 
-       if (max_level >= 0x7) {
+       if (max_level >= 0x7 && debug) {
                int has_sgx;
 
                ecx = 0;
@@ -3221,7 +3305,15 @@ void process_cpuid()
                                switch(model) {
                                case 0x4E:      /* SKL */
                                case 0x5E:      /* SKL */
-                                       crystal_hz = 24000000;  /* 24 MHz */
+                               case 0x8E:      /* KBL */
+                               case 0x9E:      /* KBL */
+                                       crystal_hz = 24000000;  /* 24.0 MHz */
+                                       break;
+                               case 0x55:      /* SKX */
+                                       crystal_hz = 25000000;  /* 25.0 MHz */
+                                       break;
+                               case 0x5C:      /* BXT */
+                                       crystal_hz = 19200000;  /* 19.2 MHz */
                                        break;
                                default:
                                        crystal_hz = 0;
@@ -3254,11 +3346,13 @@ void process_cpuid()
 
        do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
        do_snb_cstates = has_snb_msrs(family, model);
+       do_irtl_snb = has_snb_msrs(family, model);
        do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
        do_pc3 = (pkg_cstate_limit >= PCL__3);
        do_pc6 = (pkg_cstate_limit >= PCL__6);
        do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
        do_c8_c9_c10 = has_hsw_msrs(family, model);
+       do_irtl_hsw = has_hsw_msrs(family, model);
        do_skl_residency = has_skl_msrs(family, model);
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
@@ -3564,6 +3658,9 @@ void turbostat_init()
 
        if (debug)
                for_all_cpus(print_thermal, ODD_COUNTERS);
+
+       if (debug && do_irtl_snb)
+               print_irtl();
 }
 
 int fork_it(char **argv)
@@ -3629,7 +3726,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 4.11 27 Feb 2016"
+       fprintf(outf, "turbostat version 4.12 5 Apr 2016"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 69bb3fc38fb2b71ba9f10070d7331bf8d2e158ce..0840684deb7d474578050f9c230fe9196e46a486 100644 (file)
@@ -3,3 +3,4 @@ psock_fanout
 psock_tpacket
 reuseport_bpf
 reuseport_bpf_cpu
+reuseport_dualstack
index c658792d47b495badb3470e5d2f1143bbb94827a..0e5340742620bc332df4ad49737fac072738d338 100644 (file)
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
 
 CFLAGS += -I../../../../usr/include/
 
-NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
 
 all: $(NET_PROGS)
 %: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644 (file)
index 0000000..90958aa
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * It is possible to use SO_REUSEPORT to open multiple sockets bound to
+ * equivalent local addresses using AF_INET and AF_INET6 at the same time.  If
+ * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
+ * receive a given incoming packet.  However, when it is not set, incoming v4
+ * packets should prefer the AF_INET socket(s).  This behavior was defined with
+ * the original SO_REUSEPORT implementation, but broke with
+ * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+ * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
+ * AF_INET preference for v4 packets.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
+{
+       struct sockaddr_storage addr;
+       struct sockaddr_in  *addr4;
+       struct sockaddr_in6 *addr6;
+       int opt, i;
+
+       switch (family) {
+       case AF_INET:
+               addr4 = (struct sockaddr_in *)&addr;
+               addr4->sin_family = AF_INET;
+               addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+               addr4->sin_port = htons(PORT);
+               break;
+       case AF_INET6:
+               addr6 = (struct sockaddr_in6 *)&addr;
+               addr6->sin6_family = AF_INET6;
+               addr6->sin6_addr = in6addr_any;
+               addr6->sin6_port = htons(PORT);
+               break;
+       default:
+               error(1, 0, "Unsupported family %d", family);
+       }
+
+       for (i = 0; i < count; ++i) {
+               rcv_fds[i] = socket(family, proto, 0);
+               if (rcv_fds[i] < 0)
+                       error(1, errno, "failed to create receive socket");
+
+               opt = 1;
+               if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+                              sizeof(opt)))
+                       error(1, errno, "failed to set SO_REUSEPORT");
+
+               if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
+                       error(1, errno, "failed to bind receive socket");
+
+               if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+                       error(1, errno, "failed to listen on receive port");
+       }
+}
+
+static void send_from_v4(int proto)
+{
+       struct sockaddr_in  saddr, daddr;
+       int fd;
+
+       saddr.sin_family = AF_INET;
+       saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+       saddr.sin_port = 0;
+
+       daddr.sin_family = AF_INET;
+       daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+       daddr.sin_port = htons(PORT);
+
+       fd = socket(AF_INET, proto, 0);
+       if (fd < 0)
+               error(1, errno, "failed to create send socket");
+
+       if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
+               error(1, errno, "failed to bind send socket");
+
+       if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
+               error(1, errno, "failed to connect send socket");
+
+       if (send(fd, "a", 1, 0) < 0)
+               error(1, errno, "failed to send message");
+
+       close(fd);
+}
+
+static int receive_once(int epfd, int proto)
+{
+       struct epoll_event ev;
+       int i, fd;
+       char buf[8];
+
+       i = epoll_wait(epfd, &ev, 1, -1);
+       if (i < 0)
+               error(1, errno, "epoll_wait failed");
+
+       if (proto == SOCK_STREAM) {
+               fd = accept(ev.data.fd, NULL, NULL);
+               if (fd < 0)
+                       error(1, errno, "failed to accept");
+               i = recv(fd, buf, sizeof(buf), 0);
+               close(fd);
+       } else {
+               i = recv(ev.data.fd, buf, sizeof(buf), 0);
+       }
+
+       if (i < 0)
+               error(1, errno, "failed to recv");
+
+       return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int proto)
+{
+       struct epoll_event ev;
+       int epfd, i, test_fd;
+       uint16_t test_family;
+       socklen_t len;
+
+       epfd = epoll_create(1);
+       if (epfd < 0)
+               error(1, errno, "failed to create epoll");
+
+       ev.events = EPOLLIN;
+       for (i = 0; i < count; ++i) {
+               ev.data.fd = rcv_fds[i];
+               if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+                       error(1, errno, "failed to register sock epoll");
+       }
+
+       send_from_v4(proto);
+
+       test_fd = receive_once(epfd, proto);
+       if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+               error(1, errno, "failed to read socket domain");
+       if (test_family != AF_INET)
+               error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
+                     test_family);
+
+       close(epfd);
+}
+
+int main(void)
+{
+       int rcv_fds[32], i;
+
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       /* NOTE: UDP socket lookups traverse a different code path when there
+        * are > 10 sockets in a group.
+        */
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "SUCCESS\n");
+       return 0;
+}
index b9453b838162b44248b973fe34ad1b3579e8dd28..150829dd799876dc11a64bddff1760efb4a13066 100644 (file)
@@ -1497,15 +1497,15 @@ TEST_F(TRACE_syscall, syscall_dropped)
 #define SECCOMP_SET_MODE_FILTER 1
 #endif
 
-#ifndef SECCOMP_FLAG_FILTER_TSYNC
-#define SECCOMP_FLAG_FILTER_TSYNC 1
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
 #endif
 
 #ifndef seccomp
-int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
+int seccomp(unsigned int op, unsigned int flags, void *args)
 {
        errno = 0;
-       return syscall(__NR_seccomp, op, flags, filter);
+       return syscall(__NR_seccomp, op, flags, args);
 }
 #endif
 
@@ -1613,7 +1613,7 @@ TEST(TSYNC_first)
                TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
        }
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &prog);
        ASSERT_NE(ENOSYS, errno) {
                TH_LOG("Kernel does not support seccomp syscall!");
@@ -1831,7 +1831,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
                self->sibling_count++;
        }
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_EQ(0, ret) {
                TH_LOG("Could install filter on all threads!");
@@ -1892,7 +1892,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
                TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
        }
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_NE(ENOSYS, errno) {
                TH_LOG("Kernel does not support seccomp syscall!");
@@ -1940,7 +1940,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
                self->sibling_count++;
        }
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_EQ(self->sibling[0].system_tid, ret) {
                TH_LOG("Did not fail on diverged sibling.");
@@ -1992,7 +1992,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
                TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
        }
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_EQ(ret, self->sibling[0].system_tid) {
                TH_LOG("Did not fail on diverged sibling.");
@@ -2021,7 +2021,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
        /* Switch to the remaining sibling */
        sib = !sib;
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_EQ(0, ret) {
                TH_LOG("Expected the remaining sibling to sync");
@@ -2044,7 +2044,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
        while (!kill(self->sibling[sib].system_tid, 0))
                sleep(0.1);
 
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
                      &self->apply_prog);
        ASSERT_EQ(0, ret);  /* just us chickens */
 }
index a9ad4fe3f68f07848876403985e19e4c544ef1d0..9aaa35dd9144030464d1d038ea5b229c4bb33d0a 100644 (file)
@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
        vcpu->arch.timer_cpu.armed = false;
 
+       WARN_ON(!kvm_timer_should_fire(vcpu));
+
        /*
         * If the vcpu is blocked we want to wake it up so that it will see
         * the timer has expired when entering the guest.
@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        kvm_vcpu_kick(vcpu);
 }
 
+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
+{
+       cycle_t cval, now;
+
+       cval = vcpu->arch.timer_cpu.cntv_cval;
+       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+       if (now < cval) {
+               u64 ns;
+
+               ns = cyclecounter_cyc2ns(timecounter->cc,
+                                        cval - now,
+                                        timecounter->mask,
+                                        &timecounter->frac);
+               return ns;
+       }
+
+       return 0;
+}
+
 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
 {
        struct arch_timer_cpu *timer;
+       struct kvm_vcpu *vcpu;
+       u64 ns;
+
        timer = container_of(hrt, struct arch_timer_cpu, timer);
+       vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
+
+       /*
+        * Check that the timer has really expired from the guest's
+        * PoV (NTP on the host may have forced it to expire
+        * early). If we should have slept longer, restart it.
+        */
+       ns = kvm_timer_compute_delta(vcpu);
+       if (unlikely(ns)) {
+               hrtimer_forward_now(hrt, ns_to_ktime(ns));
+               return HRTIMER_RESTART;
+       }
+
        queue_work(wqueue, &timer->expired);
        return HRTIMER_NORESTART;
 }
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       u64 ns;
-       cycle_t cval, now;
 
        BUG_ON(timer_is_armed(timer));
 
@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
                return;
 
        /*  The timer has not yet expired, schedule a background timer */
-       cval = timer->cntv_cval;
-       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
-       ns = cyclecounter_cyc2ns(timecounter->cc,
-                                cval - now,
-                                timecounter->mask,
-                                &timecounter->frac);
-       timer_arm(timer, ns);
+       timer_arm(timer, kvm_timer_compute_delta(vcpu));
 }
 
 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
index b5754c6c5508f9ac77aa73df6a908ae43ee00467..575c7aa30d7e64538bb72c11289c360c2fa884a4 100644 (file)
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
 {
        u64 reg = 0;
 
-       if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+       if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
                reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
                reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
                reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
                reg &= kvm_pmu_valid_counter_mask(vcpu);
+       }
 
        return reg;
 }